[llvm] ff9af4c - [CodeGen] Convert tests to opaque pointers (NFC)

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Mon Feb 5 05:07:26 PST 2024


Author: Nikita Popov
Date: 2024-02-05T14:07:09+01:00
New Revision: ff9af4c43ad71eeba2cabe99609cfaa0fd54c1d0

URL: https://github.com/llvm/llvm-project/commit/ff9af4c43ad71eeba2cabe99609cfaa0fd54c1d0
DIFF: https://github.com/llvm/llvm-project/commit/ff9af4c43ad71eeba2cabe99609cfaa0fd54c1d0.diff

LOG: [CodeGen] Convert tests to opaque pointers (NFC)

Added: 
    

Modified: 
    llvm/test/CodeGen/BPF/BTF/builtin-btf-type-id.ll
    llvm/test/CodeGen/BPF/BTF/static-var-zerolen-array.ll
    llvm/test/CodeGen/BPF/BTF/type-tag-fixup-fwd.ll
    llvm/test/CodeGen/BPF/BTF/type-tag-fixup-resolved.ll
    llvm/test/CodeGen/BPF/CORE/offset-reloc-cast-array-1.ll
    llvm/test/CodeGen/BPF/CORE/offset-reloc-cast-array-2.ll
    llvm/test/CodeGen/BPF/CORE/offset-reloc-cast-struct-3.ll
    llvm/test/CodeGen/BPF/CORE/offset-reloc-cast-union-1.ll
    llvm/test/CodeGen/BPF/CORE/offset-reloc-cast-union-2.ll
    llvm/test/CodeGen/BPF/ex1.ll
    llvm/test/CodeGen/BPF/reloc.ll
    llvm/test/CodeGen/BPF/remove_truncate_3.ll
    llvm/test/CodeGen/BPF/sockex2.ll
    llvm/test/CodeGen/BPF/xadd.ll
    llvm/test/CodeGen/BPF/xadd_legal.ll
    llvm/test/CodeGen/Generic/DbgValueAggregate.ll
    llvm/test/CodeGen/Generic/MIRDebugify/check-line-and-variables-x.mir
    llvm/test/CodeGen/Generic/MIRDebugify/check-line-and-variables.mir
    llvm/test/CodeGen/Generic/MIRStripDebug/all.mir
    llvm/test/CodeGen/Generic/MIRStripDebug/dont-strip-real-debug-info.mir
    llvm/test/CodeGen/Generic/MIRStripDebug/multiple-moduleflags.mir
    llvm/test/CodeGen/Hexagon/autohvx/fsplat.ll
    llvm/test/CodeGen/Hexagon/autohvx/hfsplat.ll
    llvm/test/CodeGen/Hexagon/cmpy-round.ll
    llvm/test/CodeGen/Hexagon/const-pool-tf.ll
    llvm/test/CodeGen/Hexagon/debug-prologue-loc.ll
    llvm/test/CodeGen/Hexagon/fixed-spill-mutable.ll
    llvm/test/CodeGen/Hexagon/machine-sink-float-usr.mir
    llvm/test/CodeGen/Hexagon/memcpy-likely-aligned.ll
    llvm/test/CodeGen/Hexagon/swp-carried-dep1.mir
    llvm/test/CodeGen/Hexagon/swp-carried-dep2.mir
    llvm/test/CodeGen/Hexagon/swp-memrefs-epilog.ll
    llvm/test/CodeGen/Hexagon/swp-new-phi.ll
    llvm/test/CodeGen/Hexagon/v5_insns.ll
    llvm/test/CodeGen/Hexagon/v60Vasr.ll
    llvm/test/CodeGen/Hexagon/vdmpy-halide-test.ll
    llvm/test/CodeGen/Hexagon/vect-regpairs.ll
    llvm/test/CodeGen/Hexagon/vect_setcc_v2i16.ll
    llvm/test/CodeGen/Hexagon/vmpa-halide-test.ll
    llvm/test/CodeGen/Lanai/codemodel.ll
    llvm/test/CodeGen/Lanai/inlineasm-output-template.ll
    llvm/test/CodeGen/Lanai/lanai-misched-trivial-disjoint.ll
    llvm/test/CodeGen/Lanai/mem_alu_combiner.ll
    llvm/test/CodeGen/Lanai/peephole-compare.mir
    llvm/test/CodeGen/Lanai/set_and_hi.ll
    llvm/test/CodeGen/Lanai/sub-cmp-peephole.ll
    llvm/test/CodeGen/Lanai/subword.ll
    llvm/test/CodeGen/LoongArch/frame.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-ld-invalid-imm.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-ld-non-imm.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-ld.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-ldrepl-invalid-imm.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-ldrepl-non-imm.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-ldrepl.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-st-invalid-imm.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-st-non-imm.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-st.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-stelm-invalid-imm.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-stelm-non-imm.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-stelm.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-ld-invalid-imm.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-ld-non-imm.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-ld.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-ldrepl-invalid-imm.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-ldrepl-non-imm.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-ldrepl.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-st-invalid-imm.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-st-non-imm.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-st.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-stelm-invalid-imm.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-stelm-non-imm.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-stelm.ll
    llvm/test/CodeGen/LoongArch/tail-calls.ll
    llvm/test/CodeGen/MIR/AArch64/expected-target-flag-name.mir
    llvm/test/CodeGen/MIR/AArch64/invalid-target-flag-name.mir
    llvm/test/CodeGen/MIR/AArch64/machine-metadata-error.mir
    llvm/test/CodeGen/MIR/AArch64/machine-metadata.mir
    llvm/test/CodeGen/MIR/AArch64/stack-object-local-offset.mir
    llvm/test/CodeGen/MIR/AArch64/swp.mir
    llvm/test/CodeGen/MIR/AArch64/target-flags.mir
    llvm/test/CodeGen/MIR/AArch64/unnamed-stack.ll
    llvm/test/CodeGen/MIR/AMDGPU/expected-target-index-name.mir
    llvm/test/CodeGen/MIR/AMDGPU/invalid-target-index-operand.mir
    llvm/test/CodeGen/MIR/AMDGPU/machine-metadata-error.mir
    llvm/test/CodeGen/MIR/AMDGPU/machine-metadata.mir
    llvm/test/CodeGen/MIR/AMDGPU/mircanon-memoperands.mir
    llvm/test/CodeGen/MIR/AMDGPU/syncscopes.mir
    llvm/test/CodeGen/MIR/AMDGPU/target-index-operands.mir
    llvm/test/CodeGen/MIR/ARM/cfi-same-value.mir
    llvm/test/CodeGen/MIR/ARM/expected-closing-brace.mir
    llvm/test/CodeGen/MIR/ARM/thumb2-sub-sp-t3.mir
    llvm/test/CodeGen/MIR/Generic/frame-info.mir
    llvm/test/CodeGen/MIR/Generic/llvm-ir-error-reported.mir
    llvm/test/CodeGen/MIR/Mips/memory-operands.mir
    llvm/test/CodeGen/MIR/Mips/setRegClassOrRegBank.mir
    llvm/test/CodeGen/MIR/PowerPC/unordered-implicit-registers.mir
    llvm/test/CodeGen/MIR/X86/block-address-operands.mir
    llvm/test/CodeGen/MIR/X86/branch-folder-with-label.mir
    llvm/test/CodeGen/MIR/X86/callee-saved-info.mir
    llvm/test/CodeGen/MIR/X86/diexpr-win32.mir
    llvm/test/CodeGen/MIR/X86/duplicate-memory-operand-flag.mir
    llvm/test/CodeGen/MIR/X86/expected-align-in-memory-operand.mir
    llvm/test/CodeGen/MIR/X86/expected-alignment-after-align-in-memory-operand.mir
    llvm/test/CodeGen/MIR/X86/expected-block-reference-in-blockaddress.mir
    llvm/test/CodeGen/MIR/X86/expected-comma-after-memory-operand.mir
    llvm/test/CodeGen/MIR/X86/expected-different-implicit-operand.mir
    llvm/test/CodeGen/MIR/X86/expected-different-implicit-register-flag.mir
    llvm/test/CodeGen/MIR/X86/expected-function-reference-after-blockaddress.mir
    llvm/test/CodeGen/MIR/X86/expected-global-value-after-blockaddress.mir
    llvm/test/CodeGen/MIR/X86/expected-integer-after-offset-sign.mir
    llvm/test/CodeGen/MIR/X86/expected-load-or-store-in-memory-operand.mir
    llvm/test/CodeGen/MIR/X86/expected-metadata-node-after-debug-location.mir
    llvm/test/CodeGen/MIR/X86/expected-metadata-node-after-exclaim.mir
    llvm/test/CodeGen/MIR/X86/expected-metadata-node-in-stack-object.mir
    llvm/test/CodeGen/MIR/X86/expected-named-register-in-callee-saved-register.mir
    llvm/test/CodeGen/MIR/X86/expected-number-after-bb.mir
    llvm/test/CodeGen/MIR/X86/expected-pointer-value-in-memory-operand.mir
    llvm/test/CodeGen/MIR/X86/expected-positive-alignment-after-align.mir
    llvm/test/CodeGen/MIR/X86/expected-size-integer-after-memory-operation.mir
    llvm/test/CodeGen/MIR/X86/expected-size-integer-after-memory-operation2.mir
    llvm/test/CodeGen/MIR/X86/expected-stack-object.mir
    llvm/test/CodeGen/MIR/X86/expected-target-flag-name.mir
    llvm/test/CodeGen/MIR/X86/expected-value-in-memory-operand.mir
    llvm/test/CodeGen/MIR/X86/external-symbol-operands.mir
    llvm/test/CodeGen/MIR/X86/fixed-stack-di.mir
    llvm/test/CodeGen/MIR/X86/fixed-stack-memory-operands.mir
    llvm/test/CodeGen/MIR/X86/fixed-stack-objects.mir
    llvm/test/CodeGen/MIR/X86/frame-info-save-restore-points.mir
    llvm/test/CodeGen/MIR/X86/frame-info-stack-references.mir
    llvm/test/CodeGen/MIR/X86/global-value-operands.mir
    llvm/test/CodeGen/MIR/X86/instr-heap-alloc-operands.mir
    llvm/test/CodeGen/MIR/X86/instr-pcsections.mir
    llvm/test/CodeGen/MIR/X86/instructions-debug-location.mir
    llvm/test/CodeGen/MIR/X86/invalid-metadata-node-type.mir
    llvm/test/CodeGen/MIR/X86/invalid-target-flag-name.mir
    llvm/test/CodeGen/MIR/X86/large-index-number-error.mir
    llvm/test/CodeGen/MIR/X86/large-offset-number-error.mir
    llvm/test/CodeGen/MIR/X86/large-size-in-memory-operand-error.mir
    llvm/test/CodeGen/MIR/X86/machine-basic-block-operands.mir
    llvm/test/CodeGen/MIR/X86/machine-metadata-error.mir
    llvm/test/CodeGen/MIR/X86/machine-metadata.mir
    llvm/test/CodeGen/MIR/X86/machine-verifier-address.mir
    llvm/test/CodeGen/MIR/X86/metadata-operands.mir
    llvm/test/CodeGen/MIR/X86/missing-closing-quote.mir
    llvm/test/CodeGen/MIR/X86/missing-implicit-operand.mir
    llvm/test/CodeGen/MIR/X86/null-register-operands.mir
    llvm/test/CodeGen/MIR/X86/pr38773.mir
    llvm/test/CodeGen/MIR/X86/register-operands-target-flag-error.mir
    llvm/test/CodeGen/MIR/X86/spill-slot-fixed-stack-object-aliased.mir
    llvm/test/CodeGen/MIR/X86/spill-slot-fixed-stack-object-immutable.mir
    llvm/test/CodeGen/MIR/X86/spill-slot-fixed-stack-objects.mir
    llvm/test/CodeGen/MIR/X86/stack-object-debug-info.mir
    llvm/test/CodeGen/MIR/X86/stack-object-invalid-name.mir
    llvm/test/CodeGen/MIR/X86/stack-object-operand-name-mismatch-error.mir
    llvm/test/CodeGen/MIR/X86/stack-object-operands.mir
    llvm/test/CodeGen/MIR/X86/stack-object-redefinition-error.mir
    llvm/test/CodeGen/MIR/X86/stack-objects.mir
    llvm/test/CodeGen/MIR/X86/undefined-fixed-stack-object.mir
    llvm/test/CodeGen/MIR/X86/undefined-global-value.mir
    llvm/test/CodeGen/MIR/X86/undefined-ir-block-in-blockaddress.mir
    llvm/test/CodeGen/MIR/X86/undefined-ir-block-slot-in-blockaddress.mir
    llvm/test/CodeGen/MIR/X86/undefined-named-global-value.mir
    llvm/test/CodeGen/MIR/X86/undefined-stack-object.mir
    llvm/test/CodeGen/MIR/X86/undefined-value-in-memory-operand.mir
    llvm/test/CodeGen/MIR/X86/unknown-machine-basic-block.mir
    llvm/test/CodeGen/MIR/X86/unknown-metadata-keyword.mir
    llvm/test/CodeGen/MIR/X86/unknown-metadata-node.mir
    llvm/test/CodeGen/MIR/X86/unknown-named-machine-basic-block.mir
    llvm/test/CodeGen/MIR/X86/variable-sized-stack-object-size-error.mir
    llvm/test/CodeGen/MIR/X86/variable-sized-stack-objects.mir
    llvm/test/CodeGen/MSP430/2009-05-10-CyclicDAG.ll
    llvm/test/CodeGen/MSP430/2009-05-17-Rot.ll
    llvm/test/CodeGen/MSP430/2009-05-17-Shift.ll
    llvm/test/CodeGen/MSP430/2009-08-25-DynamicStackAlloc.ll
    llvm/test/CodeGen/MSP430/2009-09-18-AbsoluteAddr.ll
    llvm/test/CodeGen/MSP430/2009-10-10-OrImpDef.ll
    llvm/test/CodeGen/MSP430/2009-11-08-InvalidResNo.ll
    llvm/test/CodeGen/MSP430/2009-12-21-FrameAddr.ll
    llvm/test/CodeGen/MSP430/2009-12-22-InlineAsm.ll
    llvm/test/CodeGen/MSP430/2010-05-01-CombinerAnd.ll
    llvm/test/CodeGen/MSP430/AddrMode-bis-rx.ll
    llvm/test/CodeGen/MSP430/AddrMode-bis-xr.ll
    llvm/test/CodeGen/MSP430/AddrMode-mov-rx.ll
    llvm/test/CodeGen/MSP430/AddrMode-mov-xr.ll
    llvm/test/CodeGen/MSP430/BranchSelector.ll
    llvm/test/CodeGen/MSP430/Inst16mi.ll
    llvm/test/CodeGen/MSP430/Inst16mm.ll
    llvm/test/CodeGen/MSP430/Inst16mr.ll
    llvm/test/CodeGen/MSP430/Inst16rm.ll
    llvm/test/CodeGen/MSP430/Inst8mi.ll
    llvm/test/CodeGen/MSP430/Inst8mm.ll
    llvm/test/CodeGen/MSP430/Inst8mr.ll
    llvm/test/CodeGen/MSP430/Inst8rm.ll
    llvm/test/CodeGen/MSP430/InstII.ll
    llvm/test/CodeGen/MSP430/bit.ll
    llvm/test/CodeGen/MSP430/byval.ll
    llvm/test/CodeGen/MSP430/callee-saved.ll
    llvm/test/CodeGen/MSP430/calls.ll
    llvm/test/CodeGen/MSP430/cc_args.ll
    llvm/test/CodeGen/MSP430/cc_ret.ll
    llvm/test/CodeGen/MSP430/fp.ll
    llvm/test/CodeGen/MSP430/hwmult16.ll
    llvm/test/CodeGen/MSP430/hwmult32.ll
    llvm/test/CodeGen/MSP430/hwmultf5.ll
    llvm/test/CodeGen/MSP430/indirectbr.ll
    llvm/test/CodeGen/MSP430/indirectbr2.ll
    llvm/test/CodeGen/MSP430/inline-asm-absolute-addressing.ll
    llvm/test/CodeGen/MSP430/inline-asm.ll
    llvm/test/CodeGen/MSP430/inlineasm-output-template.ll
    llvm/test/CodeGen/MSP430/interrupt.ll
    llvm/test/CodeGen/MSP430/jumptable.ll
    llvm/test/CodeGen/MSP430/libcalls.ll
    llvm/test/CodeGen/MSP430/memset.ll
    llvm/test/CodeGen/MSP430/misched-msp430.ll
    llvm/test/CodeGen/MSP430/mult-alt-generic-msp430.ll
    llvm/test/CodeGen/MSP430/postinc.ll
    llvm/test/CodeGen/MSP430/promote-i8-mul.ll
    llvm/test/CodeGen/MSP430/spill-to-stack.ll
    llvm/test/CodeGen/MSP430/stacksave_restore.ll
    llvm/test/CodeGen/MSP430/struct-return.ll
    llvm/test/CodeGen/MSP430/struct_layout.ll
    llvm/test/CodeGen/MSP430/transient-stack-alignment.ll
    llvm/test/CodeGen/MSP430/vararg.ll
    llvm/test/CodeGen/Mips/GlobalISel/instruction-select/var_arg.mir
    llvm/test/CodeGen/Mips/GlobalISel/irtranslator/sret_pointer.ll
    llvm/test/CodeGen/Mips/GlobalISel/legalizer/store_split_because_of_memsize_or_align.mir
    llvm/test/CodeGen/Mips/GlobalISel/legalizer/var_arg.mir
    llvm/test/CodeGen/Mips/GlobalISel/mips-prelegalizer-combiner/inline-memcpy.mir
    llvm/test/CodeGen/Mips/GlobalISel/regbankselect/var_arg.mir
    llvm/test/CodeGen/Mips/hf16call32.ll
    llvm/test/CodeGen/Mips/hfptrcall.ll
    llvm/test/CodeGen/Mips/mips16_fpret.ll
    llvm/test/CodeGen/Mips/msa/emergency-spill.mir
    llvm/test/CodeGen/Mips/mulull.ll
    llvm/test/CodeGen/NVPTX/addrspacecast.ll
    llvm/test/CodeGen/NVPTX/bf16x2-instructions.ll
    llvm/test/CodeGen/NVPTX/ld-addrspace.ll
    llvm/test/CodeGen/NVPTX/ldu-ldg.ll
    llvm/test/CodeGen/NVPTX/noreturn.ll
    llvm/test/CodeGen/NVPTX/nvvm-reflect-ocl.ll
    llvm/test/CodeGen/NVPTX/nvvm-reflect-opaque.ll
    llvm/test/CodeGen/NVPTX/short-ptr.ll
    llvm/test/CodeGen/NVPTX/st-addrspace.ll
    llvm/test/CodeGen/PowerPC/2007-11-04-CoalescerCrash.ll
    llvm/test/CodeGen/PowerPC/aix-alias-alignment-2.ll
    llvm/test/CodeGen/PowerPC/aix-alias-alignment.ll
    llvm/test/CodeGen/PowerPC/aix-complex.ll
    llvm/test/CodeGen/PowerPC/aix-tls-gd-target-flags.ll
    llvm/test/CodeGen/PowerPC/block-placement.mir
    llvm/test/CodeGen/PowerPC/convert-rr-to-ri-instrs-R0-special-handling.mir
    llvm/test/CodeGen/PowerPC/convert-rr-to-ri-instrs.mir
    llvm/test/CodeGen/PowerPC/expand-foldable-isel.ll
    llvm/test/CodeGen/PowerPC/fast-isel-branch.ll
    llvm/test/CodeGen/PowerPC/lsr-insns-cost.ll
    llvm/test/CodeGen/PowerPC/ppc-TOC-stats.ll
    llvm/test/CodeGen/PowerPC/ppc32-selectcc-i64.ll
    llvm/test/CodeGen/PowerPC/preincprep-i64-check.ll
    llvm/test/CodeGen/PowerPC/preincprep-nontrans-crash.ll
    llvm/test/CodeGen/PowerPC/sink-down-more-instructions-1.mir
    llvm/test/CodeGen/PowerPC/sink-down-more-instructions-regpressure-high.mir
    llvm/test/CodeGen/PowerPC/sms-phi-1.ll
    llvm/test/CodeGen/PowerPC/sms-phi-3.ll
    llvm/test/CodeGen/PowerPC/stack-coloring-vararg.mir
    llvm/test/CodeGen/PowerPC/toc-load-sched-bug.ll
    llvm/test/CodeGen/PowerPC/vsx-infl-copy1.ll
    llvm/test/CodeGen/PowerPC/vsx-infl-copy2.ll
    llvm/test/CodeGen/RISCV/calling-conv-ilp32e.ll
    llvm/test/CodeGen/RISCV/copy-frameindex.mir
    llvm/test/CodeGen/RISCV/copyprop.ll
    llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll
    llvm/test/CodeGen/RISCV/fli-licm.ll
    llvm/test/CodeGen/RISCV/live-sp.mir
    llvm/test/CodeGen/RISCV/make-compressible-rv64.mir
    llvm/test/CodeGen/RISCV/make-compressible.mir
    llvm/test/CodeGen/RISCV/misched-load-clustering.ll
    llvm/test/CodeGen/RISCV/out-of-reach-emergency-slot.mir
    llvm/test/CodeGen/RISCV/prefetch.ll
    llvm/test/CodeGen/RISCV/push-pop-popret.ll
    llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
    llvm/test/CodeGen/RISCV/rv64-legal-i32/xtheadmemidx.ll
    llvm/test/CodeGen/RISCV/rv64-patchpoint.ll
    llvm/test/CodeGen/RISCV/rv64-stackmap-frame-setup.ll
    llvm/test/CodeGen/RISCV/rv64-stackmap.ll
    llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll
    llvm/test/CodeGen/RISCV/rvv/addi-rvv-stack-object.mir
    llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir
    llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll
    llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
    llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll
    llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll
    llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll
    llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-emergency-slot.mir
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
    llvm/test/CodeGen/RISCV/rvv/frameindex-addr.ll
    llvm/test/CodeGen/RISCV/rvv/implicit-def-copy.ll
    llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll
    llvm/test/CodeGen/RISCV/rvv/legalize-load-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/legalize-store-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/load-mask.ll
    llvm/test/CodeGen/RISCV/rvv/localvar.ll
    llvm/test/CodeGen/RISCV/rvv/masked-load-fp.ll
    llvm/test/CodeGen/RISCV/rvv/masked-load-int.ll
    llvm/test/CodeGen/RISCV/rvv/masked-store-fp.ll
    llvm/test/CodeGen/RISCV/rvv/masked-store-int.ll
    llvm/test/CodeGen/RISCV/rvv/masked-tama.ll
    llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll
    llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll
    llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll
    llvm/test/CodeGen/RISCV/rvv/memory-args.ll
    llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/narrow-shift-extend.ll
    llvm/test/CodeGen/RISCV/rvv/rvv-framelayout.ll
    llvm/test/CodeGen/RISCV/rvv/rvv-out-arguments.ll
    llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-masked-vops.ll
    llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops-mir.ll
    llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
    llvm/test/CodeGen/RISCV/rvv/rvv-stack-align.mir
    llvm/test/CodeGen/RISCV/rvv/scalable-vector-struct.ll
    llvm/test/CodeGen/RISCV/rvv/scalar-stack-align.ll
    llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
    llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll
    llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir
    llvm/test/CodeGen/RISCV/rvv/unaligned-loads-stores.ll
    llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll
    llvm/test/CodeGen/RISCV/rvv/vle.ll
    llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll
    llvm/test/CodeGen/RISCV/rvv/vleff.ll
    llvm/test/CodeGen/RISCV/rvv/vlm.ll
    llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vloxei.ll
    llvm/test/CodeGen/RISCV/rvv/vlse.ll
    llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vluxei.ll
    llvm/test/CodeGen/RISCV/rvv/vpload.ll
    llvm/test/CodeGen/RISCV/rvv/vpstore.ll
    llvm/test/CodeGen/RISCV/rvv/vse.ll
    llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll
    llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
    llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir
    llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
    llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir
    llvm/test/CodeGen/RISCV/rvv/vsetvli-intrinsics.ll
    llvm/test/CodeGen/RISCV/rvv/vsetvli-regression.ll
    llvm/test/CodeGen/RISCV/rvv/vsm.ll
    llvm/test/CodeGen/RISCV/rvv/vsoxei-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vsoxei.ll
    llvm/test/CodeGen/RISCV/rvv/vsse.ll
    llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vsuxei.ll
    llvm/test/CodeGen/RISCV/rvv/vxrm-insert.ll
    llvm/test/CodeGen/RISCV/rvv/wrong-stack-offset-for-rvv-object.mir
    llvm/test/CodeGen/RISCV/rvv/zve32-types.ll
    llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir
    llvm/test/CodeGen/RISCV/stack-realignment.ll
    llvm/test/CodeGen/RISCV/vararg-ilp32e.ll
    llvm/test/CodeGen/RISCV/xtheadfmemidx.ll
    llvm/test/CodeGen/RISCV/xtheadmemidx.ll
    llvm/test/CodeGen/RISCV/xtheadmempair.ll
    llvm/test/CodeGen/SPARC/2008-10-10-InlineAsmMemoryOperand.ll
    llvm/test/CodeGen/SPARC/2008-10-10-InlineAsmRegOperand.ll
    llvm/test/CodeGen/SPARC/2009-08-28-PIC.ll
    llvm/test/CodeGen/SPARC/2011-01-11-CC.ll
    llvm/test/CodeGen/SPARC/2011-01-11-FrameAddr.ll
    llvm/test/CodeGen/SPARC/2011-01-19-DelaySlot.ll
    llvm/test/CodeGen/SPARC/2011-01-21-ByValArgs.ll
    llvm/test/CodeGen/SPARC/2011-01-22-SRet.ll
    llvm/test/CodeGen/SPARC/2011-12-03-TailDuplication.ll
    llvm/test/CodeGen/SPARC/2012-05-01-LowerArguments.ll
    llvm/test/CodeGen/SPARC/2013-05-17-CallFrame.ll
    llvm/test/CodeGen/SPARC/32abi.ll
    llvm/test/CodeGen/SPARC/64abi.ll
    llvm/test/CodeGen/SPARC/64atomics.ll
    llvm/test/CodeGen/SPARC/64bit.ll
    llvm/test/CodeGen/SPARC/64cond.ll
    llvm/test/CodeGen/SPARC/LeonCASAInstructionUT.ll
    llvm/test/CodeGen/SPARC/LeonFixAllFDIVSQRTPassUT.ll
    llvm/test/CodeGen/SPARC/LeonInsertNOPLoadPassUT.ll
    llvm/test/CodeGen/SPARC/LeonItinerariesUT.ll
    llvm/test/CodeGen/SPARC/LeonSMACUMACInstructionUT.ll
    llvm/test/CodeGen/SPARC/atomics.ll
    llvm/test/CodeGen/SPARC/basictest.ll
    llvm/test/CodeGen/SPARC/bigreturn.ll
    llvm/test/CodeGen/SPARC/blockaddr.ll
    llvm/test/CodeGen/SPARC/cast-sret-func.ll
    llvm/test/CodeGen/SPARC/constructor.ll
    llvm/test/CodeGen/SPARC/exception.ll
    llvm/test/CodeGen/SPARC/fail-alloca-align.ll
    llvm/test/CodeGen/SPARC/float.ll
    llvm/test/CodeGen/SPARC/fp128.ll
    llvm/test/CodeGen/SPARC/fp16-promote.ll
    llvm/test/CodeGen/SPARC/func-addr.ll
    llvm/test/CodeGen/SPARC/globals.ll
    llvm/test/CodeGen/SPARC/inlineasm-output-template.ll
    llvm/test/CodeGen/SPARC/inlineasm-v9.ll
    llvm/test/CodeGen/SPARC/inlineasm.ll
    llvm/test/CodeGen/SPARC/leafproc.ll
    llvm/test/CodeGen/SPARC/missing-sret.ll
    llvm/test/CodeGen/SPARC/mult-alt-generic-sparc.ll
    llvm/test/CodeGen/SPARC/obj-relocs.ll
    llvm/test/CodeGen/SPARC/overflow-intrinsic-optimizations.ll
    llvm/test/CodeGen/SPARC/pic.ll
    llvm/test/CodeGen/SPARC/private.ll
    llvm/test/CodeGen/SPARC/reserved-regs.ll
    llvm/test/CodeGen/SPARC/select-mask.ll
    llvm/test/CodeGen/SPARC/setjmp.ll
    llvm/test/CodeGen/SPARC/spillsize.ll
    llvm/test/CodeGen/SPARC/sret-secondary.ll
    llvm/test/CodeGen/SPARC/stack-align.ll
    llvm/test/CodeGen/SPARC/stack-protector.ll
    llvm/test/CodeGen/SPARC/tailcall.ll
    llvm/test/CodeGen/SPARC/thread-pointer.ll
    llvm/test/CodeGen/SPARC/tls.ll
    llvm/test/CodeGen/SPARC/varargs-v8.ll
    llvm/test/CodeGen/SPARC/varargs.ll
    llvm/test/CodeGen/SPARC/vector-extract-elt.ll
    llvm/test/CodeGen/SPARC/zerostructcall.ll
    llvm/test/CodeGen/SystemZ/Large/branch-01.ll
    llvm/test/CodeGen/SystemZ/RAbasic-invalid-LR-update.mir
    llvm/test/CodeGen/SystemZ/clear-liverange-spillreg.mir
    llvm/test/CodeGen/SystemZ/cond-move-04.mir
    llvm/test/CodeGen/SystemZ/cond-move-05.mir
    llvm/test/CodeGen/SystemZ/cond-move-08.mir
    llvm/test/CodeGen/SystemZ/cond-move-regalloc-hints-02.mir
    llvm/test/CodeGen/SystemZ/cond-move-regalloc-hints.mir
    llvm/test/CodeGen/SystemZ/dag-combine-02.ll
    llvm/test/CodeGen/SystemZ/debuginstr-00.mir
    llvm/test/CodeGen/SystemZ/debuginstr-01.mir
    llvm/test/CodeGen/SystemZ/debuginstr-cgp.mir
    llvm/test/CodeGen/SystemZ/foldmemop-imm-02.mir
    llvm/test/CodeGen/SystemZ/foldmemop-msc.mir
    llvm/test/CodeGen/SystemZ/foldmemop-vec-binops.mir
    llvm/test/CodeGen/SystemZ/foldmemop-vec-cc.mir
    llvm/test/CodeGen/SystemZ/foldmemop-vec-cmp.mir
    llvm/test/CodeGen/SystemZ/foldmemop-vec-fusedfp.mir
    llvm/test/CodeGen/SystemZ/fp-conv-17.mir
    llvm/test/CodeGen/SystemZ/frame-26.mir
    llvm/test/CodeGen/SystemZ/int-cmp-56.mir
    llvm/test/CodeGen/SystemZ/isel-debug.ll
    llvm/test/CodeGen/SystemZ/load-and-test-RA-hints.mir
    llvm/test/CodeGen/SystemZ/loop-04.ll
    llvm/test/CodeGen/SystemZ/multiselect-02.mir
    llvm/test/CodeGen/SystemZ/postra-sched-expandedops.mir
    llvm/test/CodeGen/SystemZ/regalloc-GR128-02.mir
    llvm/test/CodeGen/SystemZ/selectcc-04.ll
    llvm/test/CodeGen/SystemZ/subregliveness-06.mir
    llvm/test/CodeGen/SystemZ/zos-landingpad.ll
    llvm/test/CodeGen/VE/Scalar/pic_access_data.ll
    llvm/test/CodeGen/VE/Scalar/pic_indirect_func_call.ll
    llvm/test/CodeGen/WebAssembly/cfg-stackify.ll
    llvm/test/CodeGen/WebAssembly/global.ll
    llvm/test/CodeGen/WebAssembly/userstack.ll
    llvm/test/CodeGen/WinCFGuard/cfguard-cast.ll
    llvm/test/CodeGen/WinCFGuard/cfguard-giats.ll
    llvm/test/CodeGen/WinCFGuard/cfguard.ll
    llvm/test/CodeGen/XCore/threads.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/BPF/BTF/builtin-btf-type-id.ll b/llvm/test/CodeGen/BPF/BTF/builtin-btf-type-id.ll
index 7b184d5b2810c..2fb8d25a2e07b 100644
--- a/llvm/test/CodeGen/BPF/BTF/builtin-btf-type-id.ll
+++ b/llvm/test/CodeGen/BPF/BTF/builtin-btf-type-id.ll
@@ -28,7 +28,7 @@ define dso_local void @prog1() #0 !dbg !28 {
 entry:
   %0 = load ptr, ptr @bpf_log, align 8, !dbg !31, !tbaa !32
   %1 = call i64 @llvm.bpf.btf.type.id(i32 0, i64 0), !dbg !36, !llvm.preserve.access.index !7
-  %call = call i32 %0(i64 %1, ptr getelementptr inbounds ({ <{ i8, i8, [98 x i8] }>, i32 }, ptr @tmp__abc, i32 0, i32 0, i32 0), i32 104), !dbg !31
+  %call = call i32 %0(i64 %1, ptr @tmp__abc, i32 104), !dbg !31
   ret void, !dbg !37
 }
 
@@ -40,7 +40,7 @@ define dso_local void @prog2() #0 !dbg !38 {
 entry:
   %0 = load ptr, ptr @bpf_log, align 8, !dbg !39, !tbaa !32
   %1 = call i64 @llvm.bpf.btf.type.id(i32 1, i64 0), !dbg !40, !llvm.preserve.access.index !6
-  %call = call i32 %0(i64 %1, ptr getelementptr inbounds ({ <{ i8, i8, [98 x i8] }>, i32 }, ptr @tmp__abc, i32 0, i32 0, i32 0), i32 104), !dbg !39
+  %call = call i32 %0(i64 %1, ptr @tmp__abc, i32 104), !dbg !39
   ret void, !dbg !41
 }
 
@@ -49,7 +49,7 @@ define dso_local void @prog3() #0 !dbg !42 {
 entry:
   %0 = load ptr, ptr @bpf_log, align 8, !dbg !43, !tbaa !32
   %1 = call i64 @llvm.bpf.btf.type.id(i32 2, i64 1), !dbg !44, !llvm.preserve.access.index !11
-  %call = call i32 %0(i64 %1, ptr getelementptr inbounds ({ <{ i8, i8, [98 x i8] }>, i32 }, ptr @tmp__abc, i32 0, i32 0, i32 0), i32 104), !dbg !43
+  %call = call i32 %0(i64 %1, ptr @tmp__abc, i32 104), !dbg !43
   ret void, !dbg !45
 }
 

diff  --git a/llvm/test/CodeGen/BPF/BTF/static-var-zerolen-array.ll b/llvm/test/CodeGen/BPF/BTF/static-var-zerolen-array.ll
index 38f7627f10e89..1ee1a174e6a5d 100644
--- a/llvm/test/CodeGen/BPF/BTF/static-var-zerolen-array.ll
+++ b/llvm/test/CodeGen/BPF/BTF/static-var-zerolen-array.ll
@@ -16,7 +16,7 @@
 
 ; Function Attrs: norecurse nounwind
 define dso_local i32 @test() local_unnamed_addr #0 !dbg !21 {
-  %1 = load volatile i32, ptr getelementptr inbounds ({ i32, i32, [10 x i8] }, ptr @sv, i64 0, i32 0), align 4, !dbg !24, !tbaa !25
+  %1 = load volatile i32, ptr @sv, align 4, !dbg !24, !tbaa !25
   ret i32 %1, !dbg !29
 }
 

diff  --git a/llvm/test/CodeGen/BPF/BTF/type-tag-fixup-fwd.ll b/llvm/test/CodeGen/BPF/BTF/type-tag-fixup-fwd.ll
index 90ee124634c26..b03d23383a0ed 100644
--- a/llvm/test/CodeGen/BPF/BTF/type-tag-fixup-fwd.ll
+++ b/llvm/test/CodeGen/BPF/BTF/type-tag-fixup-fwd.ll
@@ -18,20 +18,20 @@
 ; Compilation flag:
 ;   clang -target bpf -O2 -g -S -emit-llvm test.c
 
-%struct.map_value = type { %struct.foo* }
+%struct.map_value = type { ptr }
 %struct.foo = type opaque
 
 ; Function Attrs: nounwind
 define dso_local void @test() local_unnamed_addr #0 !dbg !7 {
 entry:
   %v = alloca %struct.map_value, align 8
-  %0 = bitcast %struct.map_value* %v to i8*, !dbg !20
-  call void @llvm.lifetime.start.p0i8(i64 8, i8* nonnull %0) #4, !dbg !20
-  call void @llvm.dbg.declare(metadata %struct.map_value* %v, metadata !11, metadata !DIExpression()), !dbg !21
-  %1 = bitcast %struct.map_value* %v to i64*, !dbg !21
-  store i64 0, i64* %1, align 8, !dbg !21
-  call void @func(%struct.map_value* noundef nonnull %v) #4, !dbg !22
-  call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %0) #4, !dbg !23
+  %0 = bitcast ptr %v to ptr, !dbg !20
+  call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %0) #4, !dbg !20
+  call void @llvm.dbg.declare(metadata ptr %v, metadata !11, metadata !DIExpression()), !dbg !21
+  %1 = bitcast ptr %v to ptr, !dbg !21
+  store i64 0, ptr %1, align 8, !dbg !21
+  call void @func(ptr noundef nonnull %v) #4, !dbg !22
+  call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %0) #4, !dbg !23
   ret void, !dbg !23
 }
 
@@ -80,15 +80,15 @@ entry:
 ; CHECK:             .ascii  "func"                          # string offset=90
 
 ; Function Attrs: argmemonly mustprogress nofree nosync nounwind willreturn
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
 
 ; Function Attrs: mustprogress nofree nosync nounwind readnone speculatable willreturn
 declare void @llvm.dbg.declare(metadata, metadata, metadata) #2
 
-declare !dbg !24 dso_local void @func(%struct.map_value* noundef) local_unnamed_addr #3
+declare !dbg !24 dso_local void @func(ptr noundef) local_unnamed_addr #3
 
 ; Function Attrs: argmemonly mustprogress nofree nosync nounwind willreturn
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture) #1
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
 
 attributes #0 = { nounwind "frame-pointer"="all" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" }
 attributes #1 = { argmemonly mustprogress nofree nosync nounwind willreturn }

diff  --git a/llvm/test/CodeGen/BPF/BTF/type-tag-fixup-resolved.ll b/llvm/test/CodeGen/BPF/BTF/type-tag-fixup-resolved.ll
index b12e2c78f04a0..21c74afeaa96e 100644
--- a/llvm/test/CodeGen/BPF/BTF/type-tag-fixup-resolved.ll
+++ b/llvm/test/CodeGen/BPF/BTF/type-tag-fixup-resolved.ll
@@ -20,20 +20,20 @@
 ; Compilation flag:
 ;   clang -target bpf -O2 -g -S -emit-llvm test.c
 
-%struct.map_value = type { %struct.foo* }
+%struct.map_value = type { ptr }
 %struct.foo = type { i32 }
 
 ; Function Attrs: nounwind
 define dso_local void @test() local_unnamed_addr #0 !dbg !7 {
 entry:
   %v = alloca %struct.map_value, align 8
-  %0 = bitcast %struct.map_value* %v to i8*, !dbg !23
-  call void @llvm.lifetime.start.p0i8(i64 8, i8* nonnull %0) #4, !dbg !23
-  call void @llvm.dbg.declare(metadata %struct.map_value* %v, metadata !11, metadata !DIExpression()), !dbg !24
-  %1 = bitcast %struct.map_value* %v to i64*, !dbg !24
-  store i64 0, i64* %1, align 8, !dbg !24
-  call void @func(%struct.map_value* noundef nonnull %v, %struct.foo* noundef null) #4, !dbg !25
-  call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %0) #4, !dbg !26
+  %0 = bitcast ptr %v to ptr, !dbg !23
+  call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %0) #4, !dbg !23
+  call void @llvm.dbg.declare(metadata ptr %v, metadata !11, metadata !DIExpression()), !dbg !24
+  %1 = bitcast ptr %v to ptr, !dbg !24
+  store i64 0, ptr %1, align 8, !dbg !24
+  call void @func(ptr noundef nonnull %v, ptr noundef null) #4, !dbg !25
+  call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %0) #4, !dbg !26
   ret void, !dbg !26
 }
 
@@ -96,15 +96,15 @@ entry:
 ; CHECK:             .ascii  "tag1"                          # string offset=97
 
 ; Function Attrs: argmemonly mustprogress nofree nosync nounwind willreturn
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
 
 ; Function Attrs: mustprogress nofree nosync nounwind readnone speculatable willreturn
 declare void @llvm.dbg.declare(metadata, metadata, metadata) #2
 
-declare !dbg !27 dso_local void @func(%struct.map_value* noundef, %struct.foo* noundef) local_unnamed_addr #3
+declare !dbg !27 dso_local void @func(ptr noundef, ptr noundef) local_unnamed_addr #3
 
 ; Function Attrs: argmemonly mustprogress nofree nosync nounwind willreturn
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture) #1
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
 
 attributes #0 = { nounwind "frame-pointer"="all" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" }
 attributes #1 = { argmemonly mustprogress nofree nosync nounwind willreturn }

diff  --git a/llvm/test/CodeGen/BPF/CORE/offset-reloc-cast-array-1.ll b/llvm/test/CodeGen/BPF/CORE/offset-reloc-cast-array-1.ll
index 8a0a461698c7c..e12221ecb3528 100644
--- a/llvm/test/CodeGen/BPF/CORE/offset-reloc-cast-array-1.ll
+++ b/llvm/test/CodeGen/BPF/CORE/offset-reloc-cast-array-1.ll
@@ -21,16 +21,16 @@ target triple = "bpf"
 %struct.v1 = type { i32, i32 }
 
 ; Function Attrs: nounwind
-define dso_local i32 @test(%struct.v3* %arg) local_unnamed_addr #0 !dbg !22 {
+define dso_local i32 @test(ptr %arg) local_unnamed_addr #0 !dbg !22 {
 entry:
-  call void @llvm.dbg.value(metadata %struct.v3* %arg, metadata !32, metadata !DIExpression()), !dbg !33
-  %0 = tail call [100 x i32]* @llvm.preserve.struct.access.index.p0a100i32.p0s_struct.v3s(%struct.v3* elementtype(%struct.v3) %arg, i32 1, i32 1), !dbg !34, !llvm.preserve.access.index !26
-  %1 = tail call i32* @llvm.preserve.array.access.index.p0i32.p0a100i32([100 x i32]* elementtype([100 x i32]) %0, i32 1, i32 0), !dbg !34, !llvm.preserve.access.index !15
-  %2 = bitcast i32* %1 to [4 x %struct.v1]*, !dbg !34
-  %3 = tail call [4 x %struct.v1]* @llvm.preserve.array.access.index.p0a4s_struct.v1s.p0a4s_struct.v1s([4 x %struct.v1]* elementtype([4 x %struct.v1]) %2, i32 0, i32 0), !dbg !34, !llvm.preserve.access.index !4
-  %4 = tail call %struct.v1* @llvm.preserve.array.access.index.p0s_struct.v1s.p0a4s_struct.v1s([4 x %struct.v1]* elementtype([4 x %struct.v1]) %3, i32 1, i32 2), !dbg !34, !llvm.preserve.access.index !5
-  %5 = tail call i32* @llvm.preserve.struct.access.index.p0i32.p0s_struct.v1s(%struct.v1* elementtype(%struct.v1) %4, i32 1, i32 1), !dbg !34, !llvm.preserve.access.index !8
-  %call = tail call i32 @get_value(i32* %5) #4, !dbg !35
+  call void @llvm.dbg.value(metadata ptr %arg, metadata !32, metadata !DIExpression()), !dbg !33
+  %0 = tail call ptr @llvm.preserve.struct.access.index.p0.p0(ptr elementtype(%struct.v3) %arg, i32 1, i32 1), !dbg !34, !llvm.preserve.access.index !26
+  %1 = tail call ptr @llvm.preserve.array.access.index.p0.p0(ptr elementtype([100 x i32]) %0, i32 1, i32 0), !dbg !34, !llvm.preserve.access.index !15
+  %2 = bitcast ptr %1 to ptr, !dbg !34
+  %3 = tail call ptr @llvm.preserve.array.access.index.p0.p0(ptr elementtype([4 x %struct.v1]) %2, i32 0, i32 0), !dbg !34, !llvm.preserve.access.index !4
+  %4 = tail call ptr @llvm.preserve.array.access.index.p0.p0(ptr elementtype([4 x %struct.v1]) %3, i32 1, i32 2), !dbg !34, !llvm.preserve.access.index !5
+  %5 = tail call ptr @llvm.preserve.struct.access.index.p0.p0(ptr elementtype(%struct.v1) %4, i32 1, i32 1), !dbg !34, !llvm.preserve.access.index !8
+  %call = tail call i32 @get_value(ptr %5) #4, !dbg !35
   ret i32 %call, !dbg !36
 }
 
@@ -60,22 +60,19 @@ entry:
 ; CHECK-NEXT:         .long   107
 ; CHECK-NEXT:         .long   0
 
-declare dso_local i32 @get_value(i32*) local_unnamed_addr #1
+declare dso_local i32 @get_value(ptr) local_unnamed_addr #1
 
 ; Function Attrs: nounwind readnone
-declare [100 x i32]* @llvm.preserve.struct.access.index.p0a100i32.p0s_struct.v3s(%struct.v3*, i32, i32) #2
+declare ptr @llvm.preserve.struct.access.index.p0.p0(ptr, i32, i32) #2
 
 ; Function Attrs: nounwind readnone
-declare i32* @llvm.preserve.array.access.index.p0i32.p0a100i32([100 x i32]*, i32, i32) #2
+declare ptr @llvm.preserve.array.access.index.p0.p0(ptr, i32, i32) #2
 
 ; Function Attrs: nounwind readnone
-declare [4 x %struct.v1]* @llvm.preserve.array.access.index.p0a4s_struct.v1s.p0a4s_struct.v1s([4 x %struct.v1]*, i32, i32) #2
 
 ; Function Attrs: nounwind readnone
-declare %struct.v1* @llvm.preserve.array.access.index.p0s_struct.v1s.p0a4s_struct.v1s([4 x %struct.v1]*, i32, i32) #2
 
 ; Function Attrs: nounwind readnone
-declare i32* @llvm.preserve.struct.access.index.p0i32.p0s_struct.v1s(%struct.v1*, i32, i32) #2
 
 ; Function Attrs: nounwind readnone speculatable willreturn
 declare void @llvm.dbg.value(metadata, metadata, metadata) #3

diff  --git a/llvm/test/CodeGen/BPF/CORE/offset-reloc-cast-array-2.ll b/llvm/test/CodeGen/BPF/CORE/offset-reloc-cast-array-2.ll
index e3b053f17251a..1764c9d12a36b 100644
--- a/llvm/test/CodeGen/BPF/CORE/offset-reloc-cast-array-2.ll
+++ b/llvm/test/CodeGen/BPF/CORE/offset-reloc-cast-array-2.ll
@@ -21,17 +21,17 @@ target triple = "bpf"
 %struct.v1 = type { i32, i32 }
 
 ; Function Attrs: nounwind
-define dso_local i32 @test(%struct.v3* %arg) local_unnamed_addr #0 !dbg !24 {
+define dso_local i32 @test(ptr %arg) local_unnamed_addr #0 !dbg !24 {
 entry:
-  call void @llvm.dbg.value(metadata %struct.v3* %arg, metadata !34, metadata !DIExpression()), !dbg !35
-  %0 = tail call [100 x i32]* @llvm.preserve.struct.access.index.p0a100i32.p0s_struct.v3s(%struct.v3* elementtype(%struct.v3) %arg, i32 1, i32 1), !dbg !36, !llvm.preserve.access.index !28
-  %1 = tail call i32* @llvm.preserve.array.access.index.p0i32.p0a100i32([100 x i32]* elementtype([100 x i32]) %0, i32 1, i32 0), !dbg !36, !llvm.preserve.access.index !15
-  %2 = bitcast i32* %1 to [4 x [4 x %struct.v1]]*, !dbg !36
-  %3 = tail call [4 x [4 x %struct.v1]]* @llvm.preserve.array.access.index.p0a4a4s_struct.v1s.p0a4a4s_struct.v1s([4 x [4 x %struct.v1]]* elementtype([4 x [4 x %struct.v1]]) %2, i32 0, i32 0), !dbg !36, !llvm.preserve.access.index !4
-  %4 = tail call [4 x %struct.v1]* @llvm.preserve.array.access.index.p0a4s_struct.v1s.p0a4a4s_struct.v1s([4 x [4 x %struct.v1]]* elementtype([4 x [4 x %struct.v1]]) %3, i32 1, i32 2), !dbg !36, !llvm.preserve.access.index !5
-  %5 = tail call %struct.v1* @llvm.preserve.array.access.index.p0s_struct.v1s.p0a4s_struct.v1s([4 x %struct.v1]* elementtype([4 x %struct.v1]) %4, i32 1, i32 3), !dbg !36, !llvm.preserve.access.index !18
-  %6 = tail call i32* @llvm.preserve.struct.access.index.p0i32.p0s_struct.v1s(%struct.v1* elementtype(%struct.v1) %5, i32 1, i32 1), !dbg !36, !llvm.preserve.access.index !8
-  %call = tail call i32 @get_value(i32* %6) #4, !dbg !37
+  call void @llvm.dbg.value(metadata ptr %arg, metadata !34, metadata !DIExpression()), !dbg !35
+  %0 = tail call ptr @llvm.preserve.struct.access.index.p0.p0(ptr elementtype(%struct.v3) %arg, i32 1, i32 1), !dbg !36, !llvm.preserve.access.index !28
+  %1 = tail call ptr @llvm.preserve.array.access.index.p0.p0(ptr elementtype([100 x i32]) %0, i32 1, i32 0), !dbg !36, !llvm.preserve.access.index !15
+  %2 = bitcast ptr %1 to ptr, !dbg !36
+  %3 = tail call ptr @llvm.preserve.array.access.index.p0.p0(ptr elementtype([4 x [4 x %struct.v1]]) %2, i32 0, i32 0), !dbg !36, !llvm.preserve.access.index !4
+  %4 = tail call ptr @llvm.preserve.array.access.index.p0.p0(ptr elementtype([4 x [4 x %struct.v1]]) %3, i32 1, i32 2), !dbg !36, !llvm.preserve.access.index !5
+  %5 = tail call ptr @llvm.preserve.array.access.index.p0.p0(ptr elementtype([4 x %struct.v1]) %4, i32 1, i32 3), !dbg !36, !llvm.preserve.access.index !18
+  %6 = tail call ptr @llvm.preserve.struct.access.index.p0.p0(ptr elementtype(%struct.v1) %5, i32 1, i32 1), !dbg !36, !llvm.preserve.access.index !8
+  %call = tail call i32 @get_value(ptr %6) #4, !dbg !37
   ret i32 %call, !dbg !38
 }
 
@@ -62,25 +62,21 @@ entry:
 ; CHECK-NEXT:         .long   107
 ; CHECK-NEXT:         .long   0
 
-declare dso_local i32 @get_value(i32*) local_unnamed_addr #1
+declare dso_local i32 @get_value(ptr) local_unnamed_addr #1
 
 ; Function Attrs: nounwind readnone
-declare [100 x i32]* @llvm.preserve.struct.access.index.p0a100i32.p0s_struct.v3s(%struct.v3*, i32, i32) #2
+declare ptr @llvm.preserve.struct.access.index.p0.p0(ptr, i32, i32) #2
 
 ; Function Attrs: nounwind readnone
-declare i32* @llvm.preserve.array.access.index.p0i32.p0a100i32([100 x i32]*, i32, i32) #2
+declare ptr @llvm.preserve.array.access.index.p0.p0(ptr, i32, i32) #2
 
 ; Function Attrs: nounwind readnone
-declare [4 x [4 x %struct.v1]]* @llvm.preserve.array.access.index.p0a4a4s_struct.v1s.p0a4a4s_struct.v1s([4 x [4 x %struct.v1]]*, i32, i32) #2
 
 ; Function Attrs: nounwind readnone
-declare [4 x %struct.v1]* @llvm.preserve.array.access.index.p0a4s_struct.v1s.p0a4a4s_struct.v1s([4 x [4 x %struct.v1]]*, i32, i32) #2
 
 ; Function Attrs: nounwind readnone
-declare %struct.v1* @llvm.preserve.array.access.index.p0s_struct.v1s.p0a4s_struct.v1s([4 x %struct.v1]*, i32, i32) #2
 
 ; Function Attrs: nounwind readnone
-declare i32* @llvm.preserve.struct.access.index.p0i32.p0s_struct.v1s(%struct.v1*, i32, i32) #2
 
 ; Function Attrs: nounwind readnone speculatable willreturn
 declare void @llvm.dbg.value(metadata, metadata, metadata) #3

diff  --git a/llvm/test/CodeGen/BPF/CORE/offset-reloc-cast-struct-3.ll b/llvm/test/CodeGen/BPF/CORE/offset-reloc-cast-struct-3.ll
index 952183c739384..dea6e406678f0 100644
--- a/llvm/test/CodeGen/BPF/CORE/offset-reloc-cast-struct-3.ll
+++ b/llvm/test/CodeGen/BPF/CORE/offset-reloc-cast-struct-3.ll
@@ -22,14 +22,14 @@ target triple = "bpf"
 %struct.v1 = type { i32, i32 }
 
 ; Function Attrs: nounwind
-define dso_local i32 @test(%struct.v3* %arg) local_unnamed_addr #0 !dbg !19 {
+define dso_local i32 @test(ptr %arg) local_unnamed_addr #0 !dbg !19 {
 entry:
-  call void @llvm.dbg.value(metadata %struct.v3* %arg, metadata !30, metadata !DIExpression()), !dbg !31
-  %0 = tail call [40 x i32]* @llvm.preserve.struct.access.index.p0a40i32.p0s_struct.v3s(%struct.v3* elementtype(%struct.v3) %arg, i32 1, i32 1), !dbg !32, !llvm.preserve.access.index !24
-  %1 = tail call i32* @llvm.preserve.array.access.index.p0i32.p0a40i32([40 x i32]* elementtype([40 x i32]) %0, i32 1, i32 4), !dbg !32, !llvm.preserve.access.index !11
-  %2 = bitcast i32* %1 to %struct.v1*, !dbg !32
-  %3 = tail call i32* @llvm.preserve.struct.access.index.p0i32.p0s_struct.v1s(%struct.v1* elementtype(%struct.v1) %2, i32 1, i32 1), !dbg !32, !llvm.preserve.access.index !6
-  %call = tail call i32 @get_value(i32* %3) #4, !dbg !33
+  call void @llvm.dbg.value(metadata ptr %arg, metadata !30, metadata !DIExpression()), !dbg !31
+  %0 = tail call ptr @llvm.preserve.struct.access.index.p0.p0(ptr elementtype(%struct.v3) %arg, i32 1, i32 1), !dbg !32, !llvm.preserve.access.index !24
+  %1 = tail call ptr @llvm.preserve.array.access.index.p0.p0(ptr elementtype([40 x i32]) %0, i32 1, i32 4), !dbg !32, !llvm.preserve.access.index !11
+  %2 = bitcast ptr %1 to ptr, !dbg !32
+  %3 = tail call ptr @llvm.preserve.struct.access.index.p0.p0(ptr elementtype(%struct.v1) %2, i32 1, i32 1), !dbg !32, !llvm.preserve.access.index !6
+  %call = tail call i32 @get_value(ptr %3) #4, !dbg !33
   ret i32 %call, !dbg !34
 }
 
@@ -60,16 +60,15 @@ entry:
 ; CHECK-NEXT:        .long   118
 ; CHECK-NEXT:        .long   0
 
-declare dso_local i32 @get_value(i32*) local_unnamed_addr #1
+declare dso_local i32 @get_value(ptr) local_unnamed_addr #1
 
 ; Function Attrs: nounwind readnone
-declare [40 x i32]* @llvm.preserve.struct.access.index.p0a40i32.p0s_struct.v3s(%struct.v3*, i32, i32) #2
+declare ptr @llvm.preserve.struct.access.index.p0.p0(ptr, i32, i32) #2
 
 ; Function Attrs: nounwind readnone
-declare i32* @llvm.preserve.array.access.index.p0i32.p0a40i32([40 x i32]*, i32, i32) #2
+declare ptr @llvm.preserve.array.access.index.p0.p0(ptr, i32, i32) #2
 
 ; Function Attrs: nounwind readnone
-declare i32* @llvm.preserve.struct.access.index.p0i32.p0s_struct.v1s(%struct.v1*, i32, i32) #2
 
 ; Function Attrs: nounwind readnone speculatable willreturn
 declare void @llvm.dbg.value(metadata, metadata, metadata) #3

diff  --git a/llvm/test/CodeGen/BPF/CORE/offset-reloc-cast-union-1.ll b/llvm/test/CodeGen/BPF/CORE/offset-reloc-cast-union-1.ll
index 4f80edb496003..98fdfded2fee4 100644
--- a/llvm/test/CodeGen/BPF/CORE/offset-reloc-cast-union-1.ll
+++ b/llvm/test/CodeGen/BPF/CORE/offset-reloc-cast-union-1.ll
@@ -24,14 +24,14 @@ target triple = "bpf"
 %union.v1 = type { i32 }
 
 ; Function Attrs: nounwind
-define dso_local i32 @test(%union.v3* %arg) local_unnamed_addr #0 !dbg !15 {
+define dso_local i32 @test(ptr %arg) local_unnamed_addr #0 !dbg !15 {
 entry:
-  call void @llvm.dbg.value(metadata %union.v3* %arg, metadata !33, metadata !DIExpression()), !dbg !34
-  %0 = tail call %union.v3* @llvm.preserve.union.access.index.p0s_union.v3s.p0s_union.v3s(%union.v3* %arg, i32 1), !dbg !35, !llvm.preserve.access.index !20
-  %1 = bitcast %union.v3* %0 to %union.v1*, !dbg !35
-  %2 = tail call %union.v1* @llvm.preserve.union.access.index.p0s_union.v1s.p0s_union.v1s(%union.v1* %1, i32 1), !dbg !35, !llvm.preserve.access.index !6
-  %b = getelementptr inbounds %union.v1, %union.v1* %2, i64 0, i32 0, !dbg !35
-  %call = tail call i32 @get_value(i32* %b) #4, !dbg !36
+  call void @llvm.dbg.value(metadata ptr %arg, metadata !33, metadata !DIExpression()), !dbg !34
+  %0 = tail call ptr @llvm.preserve.union.access.index.p0.p0(ptr %arg, i32 1), !dbg !35, !llvm.preserve.access.index !20
+  %1 = bitcast ptr %0 to ptr, !dbg !35
+  %2 = tail call ptr @llvm.preserve.union.access.index.p0.p0(ptr %1, i32 1), !dbg !35, !llvm.preserve.access.index !6
+  %b = getelementptr inbounds %union.v1, ptr %2, i64 0, i32 0, !dbg !35
+  %call = tail call i32 @get_value(ptr %b) #4, !dbg !36
   ret i32 %call, !dbg !37
 }
 
@@ -61,13 +61,12 @@ entry:
 ; CHECK-NEXT:        .long   45
 ; CHECK-NEXT:        .long   0
 
-declare dso_local i32 @get_value(i32*) local_unnamed_addr #1
+declare dso_local i32 @get_value(ptr) local_unnamed_addr #1
 
 ; Function Attrs: nounwind readnone
-declare %union.v3* @llvm.preserve.union.access.index.p0s_union.v3s.p0s_union.v3s(%union.v3*, i32) #2
+declare ptr @llvm.preserve.union.access.index.p0.p0(ptr, i32) #2
 
 ; Function Attrs: nounwind readnone
-declare %union.v1* @llvm.preserve.union.access.index.p0s_union.v1s.p0s_union.v1s(%union.v1*, i32) #2
 
 ; Function Attrs: nounwind readnone speculatable willreturn
 declare void @llvm.dbg.value(metadata, metadata, metadata) #3

diff  --git a/llvm/test/CodeGen/BPF/CORE/offset-reloc-cast-union-2.ll b/llvm/test/CodeGen/BPF/CORE/offset-reloc-cast-union-2.ll
index d92df5bf80946..7b6369912111c 100644
--- a/llvm/test/CodeGen/BPF/CORE/offset-reloc-cast-union-2.ll
+++ b/llvm/test/CodeGen/BPF/CORE/offset-reloc-cast-union-2.ll
@@ -22,16 +22,16 @@ target triple = "bpf"
 %union.v1 = type { i32 }
 
 ; Function Attrs: nounwind
-define dso_local i32 @test(%union.v3* %arg) local_unnamed_addr #0 !dbg !19 {
+define dso_local i32 @test(ptr %arg) local_unnamed_addr #0 !dbg !19 {
 entry:
-  call void @llvm.dbg.value(metadata %union.v3* %arg, metadata !30, metadata !DIExpression()), !dbg !31
-  %0 = tail call %union.v3* @llvm.preserve.union.access.index.p0s_union.v3s.p0s_union.v3s(%union.v3* %arg, i32 1), !dbg !32, !llvm.preserve.access.index !24
-  %d = getelementptr inbounds %union.v3, %union.v3* %0, i64 0, i32 0, !dbg !32
-  %1 = tail call i32* @llvm.preserve.array.access.index.p0i32.p0a40i32([40 x i32]* elementtype([40 x i32]) %d, i32 1, i32 4), !dbg !32, !llvm.preserve.access.index !11
-  %2 = bitcast i32* %1 to %union.v1*, !dbg !32
-  %3 = tail call %union.v1* @llvm.preserve.union.access.index.p0s_union.v1s.p0s_union.v1s(%union.v1* %2, i32 1), !dbg !32, !llvm.preserve.access.index !6
-  %b = getelementptr inbounds %union.v1, %union.v1* %3, i64 0, i32 0, !dbg !32
-  %call = tail call i32 @get_value(i32* %b) #4, !dbg !33
+  call void @llvm.dbg.value(metadata ptr %arg, metadata !30, metadata !DIExpression()), !dbg !31
+  %0 = tail call ptr @llvm.preserve.union.access.index.p0.p0(ptr %arg, i32 1), !dbg !32, !llvm.preserve.access.index !24
+  %d = getelementptr inbounds %union.v3, ptr %0, i64 0, i32 0, !dbg !32
+  %1 = tail call ptr @llvm.preserve.array.access.index.p0.p0(ptr elementtype([40 x i32]) %d, i32 1, i32 4), !dbg !32, !llvm.preserve.access.index !11
+  %2 = bitcast ptr %1 to ptr, !dbg !32
+  %3 = tail call ptr @llvm.preserve.union.access.index.p0.p0(ptr %2, i32 1), !dbg !32, !llvm.preserve.access.index !6
+  %b = getelementptr inbounds %union.v1, ptr %3, i64 0, i32 0, !dbg !32
+  %call = tail call i32 @get_value(ptr %b) #4, !dbg !33
   ret i32 %call, !dbg !34
 }
 
@@ -62,16 +62,15 @@ entry:
 ; CHECK-NEXT:        .long   118
 ; CHECK-NEXT:        .long   0
 
-declare dso_local i32 @get_value(i32*) local_unnamed_addr #1
+declare dso_local i32 @get_value(ptr) local_unnamed_addr #1
 
 ; Function Attrs: nounwind readnone
-declare %union.v3* @llvm.preserve.union.access.index.p0s_union.v3s.p0s_union.v3s(%union.v3*, i32) #2
+declare ptr @llvm.preserve.union.access.index.p0.p0(ptr, i32) #2
 
 ; Function Attrs: nounwind readnone
-declare i32* @llvm.preserve.array.access.index.p0i32.p0a40i32([40 x i32]*, i32, i32) #2
+declare ptr @llvm.preserve.array.access.index.p0.p0(ptr, i32, i32) #2
 
 ; Function Attrs: nounwind readnone
-declare %union.v1* @llvm.preserve.union.access.index.p0s_union.v1s.p0s_union.v1s(%union.v1*, i32) #2
 
 ; Function Attrs: nounwind readnone speculatable willreturn
 declare void @llvm.dbg.value(metadata, metadata, metadata) #3

diff  --git a/llvm/test/CodeGen/BPF/ex1.ll b/llvm/test/CodeGen/BPF/ex1.ll
index 42953e3e3137e..7f9ec841b4038 100644
--- a/llvm/test/CodeGen/BPF/ex1.ll
+++ b/llvm/test/CodeGen/BPF/ex1.ll
@@ -8,25 +8,25 @@
 @bpf_prog1.fmt = private unnamed_addr constant [15 x i8] c"skb %x dev %x\0A\00", align 1
 
 ; Function Attrs: nounwind uwtable
-define i32 @bpf_prog1(%struct.bpf_context* nocapture %ctx) #0 section "events/net/netif_receive_skb" {
+define i32 @bpf_prog1(ptr nocapture %ctx) #0 section "events/net/netif_receive_skb" {
   %devname = alloca [3 x i8], align 1
   %fmt = alloca [15 x i8], align 1
-  %1 = getelementptr inbounds [3 x i8], [3 x i8]* %devname, i64 0, i64 0
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* getelementptr inbounds ([3 x i8], [3 x i8]* @bpf_prog1.devname, i64 0, i64 0), i64 3, i1 false)
-  %2 = getelementptr inbounds %struct.bpf_context, %struct.bpf_context* %ctx, i64 0, i32 0
-  %3 = load i64, i64* %2, align 8
-  %4 = inttoptr i64 %3 to %struct.sk_buff*
-  %5 = getelementptr inbounds %struct.sk_buff, %struct.sk_buff* %4, i64 0, i32 2
-  %6 = bitcast i64* %5 to i8*
-  %7 = call i8* inttoptr (i64 4 to i8* (i8*)*)(i8* %6) #1
-  %8 = call i32 inttoptr (i64 9 to i32 (i8*, i8*, i32)*)(i8* %7, i8* %1, i32 2) #1
+  %1 = getelementptr inbounds [3 x i8], ptr %devname, i64 0, i64 0
+  call void @llvm.memcpy.p0.p0.i64(ptr %1, ptr @bpf_prog1.devname, i64 3, i1 false)
+  %2 = getelementptr inbounds %struct.bpf_context, ptr %ctx, i64 0, i32 0
+  %3 = load i64, ptr %2, align 8
+  %4 = inttoptr i64 %3 to ptr
+  %5 = getelementptr inbounds %struct.sk_buff, ptr %4, i64 0, i32 2
+  %6 = bitcast ptr %5 to ptr
+  %7 = call ptr inttoptr (i64 4 to ptr)(ptr %6) #1
+  %8 = call i32 inttoptr (i64 9 to ptr)(ptr %7, ptr %1, i32 2) #1
   %9 = icmp eq i32 %8, 0
   br i1 %9, label %10, label %13
 
 ; <label>:10                                      ; preds = %0
-  %11 = getelementptr inbounds [15 x i8], [15 x i8]* %fmt, i64 0, i64 0
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %11, i8* getelementptr inbounds ([15 x i8], [15 x i8]* @bpf_prog1.fmt, i64 0, i64 0), i64 15, i1 false)
-  %12 = call i32 (i8*, i32, ...) inttoptr (i64 11 to i32 (i8*, i32, ...)*)(i8* %11, i32 15, %struct.sk_buff* %4, i8* %7) #1
+  %11 = getelementptr inbounds [15 x i8], ptr %fmt, i64 0, i64 0
+  call void @llvm.memcpy.p0.p0.i64(ptr %11, ptr @bpf_prog1.fmt, i64 15, i1 false)
+  %12 = call i32 (ptr, i32, ...) inttoptr (i64 11 to ptr)(ptr %11, i32 15, ptr %4, ptr %7) #1
 ; CHECK-LABEL: bpf_prog1:
 ; CHECK: call 4
 ; CHECK: call 9
@@ -45,4 +45,4 @@ define i32 @bpf_prog1(%struct.bpf_context* nocapture %ctx) #0 section "events/ne
 }
 
 ; Function Attrs: nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) #1
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) #1

diff  --git a/llvm/test/CodeGen/BPF/reloc.ll b/llvm/test/CodeGen/BPF/reloc.ll
index be48550691771..f1b957d3eb868 100644
--- a/llvm/test/CodeGen/BPF/reloc.ll
+++ b/llvm/test/CodeGen/BPF/reloc.ll
@@ -8,25 +8,25 @@
 @bpf_prog1.fmt = private unnamed_addr constant [15 x i8] c"skb %x dev %x\0A\00", align 1
 
 ; Function Attrs: norecurse
-define i32 @bpf_prog1(%struct.bpf_context* nocapture %ctx) #0 section "events/net/netif_receive_skb" {
+define i32 @bpf_prog1(ptr nocapture %ctx) #0 section "events/net/netif_receive_skb" {
   %devname = alloca [3 x i8], align 1
   %fmt = alloca [15 x i8], align 1
-  %1 = getelementptr inbounds [3 x i8], [3 x i8]* %devname, i64 0, i64 0
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* getelementptr inbounds ([3 x i8], [3 x i8]* @bpf_prog1.devname, i64 0, i64 0), i64 3, i1 false)
-  %2 = getelementptr inbounds %struct.bpf_context, %struct.bpf_context* %ctx, i64 0, i32 0
-  %3 = load i64, i64* %2, align 8
-  %4 = inttoptr i64 %3 to %struct.sk_buff*
-  %5 = getelementptr inbounds %struct.sk_buff, %struct.sk_buff* %4, i64 0, i32 2
-  %6 = bitcast i64* %5 to i8*
-  %7 = call i8* inttoptr (i64 4 to i8* (i8*)*)(i8* %6) #1
-  %8 = call i32 inttoptr (i64 9 to i32 (i8*, i8*, i32)*)(i8* %7, i8* %1, i32 2) #1
+  %1 = getelementptr inbounds [3 x i8], ptr %devname, i64 0, i64 0
+  call void @llvm.memcpy.p0.p0.i64(ptr %1, ptr @bpf_prog1.devname, i64 3, i1 false)
+  %2 = getelementptr inbounds %struct.bpf_context, ptr %ctx, i64 0, i32 0
+  %3 = load i64, ptr %2, align 8
+  %4 = inttoptr i64 %3 to ptr
+  %5 = getelementptr inbounds %struct.sk_buff, ptr %4, i64 0, i32 2
+  %6 = bitcast ptr %5 to ptr
+  %7 = call ptr inttoptr (i64 4 to ptr)(ptr %6) #1
+  %8 = call i32 inttoptr (i64 9 to ptr)(ptr %7, ptr %1, i32 2) #1
   %9 = icmp eq i32 %8, 0
   br i1 %9, label %10, label %13
 
 ; <label>:10                                      ; preds = %0
-  %11 = getelementptr inbounds [15 x i8], [15 x i8]* %fmt, i64 0, i64 0
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %11, i8* getelementptr inbounds ([15 x i8], [15 x i8]* @bpf_prog1.fmt, i64 0, i64 0), i64 15, i1 false)
-  %12 = call i32 (i8*, i32, ...) inttoptr (i64 11 to i32 (i8*, i32, ...)*)(i8* %11, i32 15, %struct.sk_buff* %4, i8* %7) #1
+  %11 = getelementptr inbounds [15 x i8], ptr %fmt, i64 0, i64 0
+  call void @llvm.memcpy.p0.p0.i64(ptr %11, ptr @bpf_prog1.fmt, i64 15, i1 false)
+  %12 = call i32 (ptr, i32, ...) inttoptr (i64 11 to ptr)(ptr %11, i32 15, ptr %4, ptr %7) #1
   br label %13
 
 ; <label>:13                                      ; preds = %10, %0
@@ -38,6 +38,6 @@ define i32 @bpf_prog1(%struct.bpf_context* nocapture %ctx) #0 section "events/ne
 }
 
 ; Function Attrs: nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) #1
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) #1
 
 attributes #0 = { norecurse }

diff  --git a/llvm/test/CodeGen/BPF/remove_truncate_3.ll b/llvm/test/CodeGen/BPF/remove_truncate_3.ll
index c9649a644fb85..82057dc8fbee9 100644
--- a/llvm/test/CodeGen/BPF/remove_truncate_3.ll
+++ b/llvm/test/CodeGen/BPF/remove_truncate_3.ll
@@ -38,46 +38,46 @@
 @gbl = common local_unnamed_addr global i32 0, align 4
 
 ; Function Attrs: norecurse nounwind
-define i32 @xdp_dummy(%struct.xdp_md* nocapture readonly) local_unnamed_addr #0 {
-  %2 = load i32, i32* @gbl, align 4
+define i32 @xdp_dummy(ptr nocapture readonly) local_unnamed_addr #0 {
+  %2 = load i32, ptr @gbl, align 4
   %3 = icmp eq i32 %2, 0
   br i1 %3, label %11, label %4
 
 ; <label>:4:                                      ; preds = %1
-  %5 = getelementptr inbounds %struct.xdp_md, %struct.xdp_md* %0, i64 0, i32 0
-  %6 = load i32, i32* %5, align 4
+  %5 = getelementptr inbounds %struct.xdp_md, ptr %0, i64 0, i32 0
+  %6 = load i32, ptr %5, align 4
   %7 = zext i32 %6 to i64
-  %8 = inttoptr i64 %7 to i8*
-  %9 = load i8, i8* %8, align 1
+  %8 = inttoptr i64 %7 to ptr
+  %9 = load i8, ptr %8, align 1
   %10 = icmp eq i8 %9, 1
   br i1 %10, label %28, label %23
 ; CHECK:  r1 = *(u32 *)(r1 + 0)
 ; CHECK:  r2 = *(u8 *)(r1 + 0)
 
 ; <label>:11:                                     ; preds = %1
-  %12 = getelementptr inbounds %struct.xdp_md, %struct.xdp_md* %0, i64 0, i32 1
-  %13 = load i32, i32* %12, align 4
+  %12 = getelementptr inbounds %struct.xdp_md, ptr %0, i64 0, i32 1
+  %13 = load i32, ptr %12, align 4
   %14 = zext i32 %13 to i64
 ; CHECK:  r2 = *(u32 *)(r1 + 4)
-  %15 = inttoptr i64 %14 to i8*
-  %16 = load volatile i8, i8* %15, align 1
+  %15 = inttoptr i64 %14 to ptr
+  %16 = load volatile i8, ptr %15, align 1
 ; CHECK:  r2 = *(u8 *)(r2 + 0)
   %17 = icmp eq i8 %16, 1
   br i1 %17, label %28, label %18
 
 ; <label>:18:                                     ; preds = %11
-  %19 = getelementptr inbounds %struct.xdp_md, %struct.xdp_md* %0, i64 0, i32 0
-  %20 = load i32, i32* %19, align 4
+  %19 = getelementptr inbounds %struct.xdp_md, ptr %0, i64 0, i32 0
+  %20 = load i32, ptr %19, align 4
   %21 = zext i32 %20 to i64
-  %22 = inttoptr i64 %21 to i8*
+  %22 = inttoptr i64 %21 to ptr
   br label %23
 ; CHECK: r1 = *(u32 *)(r1 + 0)
 
 ; <label>:23:                                     ; preds = %18, %4
-  %24 = phi i8* [ %22, %18 ], [ %8, %4 ]
+  %24 = phi ptr [ %22, %18 ], [ %8, %4 ]
 ; CHECK-NOT: r1 <<= 32
 ; CHECK-NOT: r1 >>= 32
-  %25 = load volatile i8, i8* %24, align 1
+  %25 = load volatile i8, ptr %24, align 1
 ; CHECK:  r1 = *(u8 *)(r1 + 0)
   %26 = icmp eq i8 %25, 0
   %27 = zext i1 %26 to i32

diff  --git a/llvm/test/CodeGen/BPF/sockex2.ll b/llvm/test/CodeGen/BPF/sockex2.ll
index 9b873b221f42e..4131d9dac31d8 100644
--- a/llvm/test/CodeGen/BPF/sockex2.ll
+++ b/llvm/test/CodeGen/BPF/sockex2.ll
@@ -6,16 +6,16 @@
 @hash_map = global %struct.bpf_map_def { i32 1, i32 4, i32 8, i32 1024 }, section "maps", align 4
 
 ; Function Attrs: nounwind uwtable
-define i32 @bpf_prog2(%struct.sk_buff* %skb) #0 section "socket2" {
+define i32 @bpf_prog2(ptr %skb) #0 section "socket2" {
   %key = alloca i32, align 4
   %val = alloca i64, align 8
-  %1 = bitcast %struct.sk_buff* %skb to i8*
-  %2 = call i64 @llvm.bpf.load.half(i8* %1, i64 12) #2
+  %1 = bitcast ptr %skb to ptr
+  %2 = call i64 @llvm.bpf.load.half(ptr %1, i64 12) #2
   %3 = icmp eq i64 %2, 34984
   br i1 %3, label %4, label %6
 
 ; <label>:4                                       ; preds = %0
-  %5 = call i64 @llvm.bpf.load.half(i8* %1, i64 16) #2
+  %5 = call i64 @llvm.bpf.load.half(ptr %1, i64 16) #2
   br label %6
 
 ; <label>:6                                       ; preds = %4, %0
@@ -26,7 +26,7 @@ define i32 @bpf_prog2(%struct.sk_buff* %skb) #0 section "socket2" {
 
 ; <label>:8                                       ; preds = %6
   %9 = add i64 %nhoff.0.i, 2
-  %10 = call i64 @llvm.bpf.load.half(i8* %1, i64 %9) #2
+  %10 = call i64 @llvm.bpf.load.half(ptr %1, i64 %9) #2
   %11 = add i64 %nhoff.0.i, 4
   br label %12
 
@@ -40,30 +40,30 @@ define i32 @bpf_prog2(%struct.sk_buff* %skb) #0 section "socket2" {
 
 ; <label>:13                                      ; preds = %12
   %14 = add i64 %nhoff.1.i, 6
-  %15 = call i64 @llvm.bpf.load.half(i8* %1, i64 %14) #2
+  %15 = call i64 @llvm.bpf.load.half(ptr %1, i64 %14) #2
   %16 = and i64 %15, 16383
   %17 = icmp eq i64 %16, 0
   br i1 %17, label %18, label %.thread.i.i
 
 ; <label>:18                                      ; preds = %13
   %19 = add i64 %nhoff.1.i, 9
-  %20 = call i64 @llvm.bpf.load.byte(i8* %1, i64 %19) #2
+  %20 = call i64 @llvm.bpf.load.byte(ptr %1, i64 %19) #2
   %21 = icmp eq i64 %20, 47
   br i1 %21, label %28, label %.thread.i.i
 
 .thread.i.i:                                      ; preds = %18, %13
   %22 = phi i64 [ %20, %18 ], [ 0, %13 ]
   %23 = add i64 %nhoff.1.i, 12
-  %24 = call i64 @llvm.bpf.load.word(i8* %1, i64 %23) #2
+  %24 = call i64 @llvm.bpf.load.word(ptr %1, i64 %23) #2
   %25 = add i64 %nhoff.1.i, 16
-  %26 = call i64 @llvm.bpf.load.word(i8* %1, i64 %25) #2
+  %26 = call i64 @llvm.bpf.load.word(ptr %1, i64 %25) #2
   %27 = trunc i64 %26 to i32
   br label %28
 
 ; <label>:28                                      ; preds = %.thread.i.i, %18
   %29 = phi i32 [ %27, %.thread.i.i ], [ undef, %18 ]
   %30 = phi i64 [ %22, %.thread.i.i ], [ 47, %18 ]
-  %31 = call i64 @llvm.bpf.load.byte(i8* %1, i64 %nhoff.1.i) #2
+  %31 = call i64 @llvm.bpf.load.byte(ptr %1, i64 %nhoff.1.i) #2
   %32 = icmp eq i64 %31, 69
   br i1 %32, label %33, label %35
 
@@ -79,23 +79,23 @@ define i32 @bpf_prog2(%struct.sk_buff* %skb) #0 section "socket2" {
 
 ; <label>:39                                      ; preds = %12
   %40 = add i64 %nhoff.1.i, 6
-  %41 = call i64 @llvm.bpf.load.byte(i8* %1, i64 %40) #2
+  %41 = call i64 @llvm.bpf.load.byte(ptr %1, i64 %40) #2
   %42 = add i64 %nhoff.1.i, 8
-  %43 = call i64 @llvm.bpf.load.word(i8* %1, i64 %42) #2
+  %43 = call i64 @llvm.bpf.load.word(ptr %1, i64 %42) #2
   %44 = add i64 %nhoff.1.i, 12
-  %45 = call i64 @llvm.bpf.load.word(i8* %1, i64 %44) #2
+  %45 = call i64 @llvm.bpf.load.word(ptr %1, i64 %44) #2
   %46 = add i64 %nhoff.1.i, 16
-  %47 = call i64 @llvm.bpf.load.word(i8* %1, i64 %46) #2
+  %47 = call i64 @llvm.bpf.load.word(ptr %1, i64 %46) #2
   %48 = add i64 %nhoff.1.i, 20
-  %49 = call i64 @llvm.bpf.load.word(i8* %1, i64 %48) #2
+  %49 = call i64 @llvm.bpf.load.word(ptr %1, i64 %48) #2
   %50 = add i64 %nhoff.1.i, 24
-  %51 = call i64 @llvm.bpf.load.word(i8* %1, i64 %50) #2
+  %51 = call i64 @llvm.bpf.load.word(ptr %1, i64 %50) #2
   %52 = add i64 %nhoff.1.i, 28
-  %53 = call i64 @llvm.bpf.load.word(i8* %1, i64 %52) #2
+  %53 = call i64 @llvm.bpf.load.word(ptr %1, i64 %52) #2
   %54 = add i64 %nhoff.1.i, 32
-  %55 = call i64 @llvm.bpf.load.word(i8* %1, i64 %54) #2
+  %55 = call i64 @llvm.bpf.load.word(ptr %1, i64 %54) #2
   %56 = add i64 %nhoff.1.i, 36
-  %57 = call i64 @llvm.bpf.load.word(i8* %1, i64 %56) #2
+  %57 = call i64 @llvm.bpf.load.word(ptr %1, i64 %56) #2
   %58 = xor i64 %53, %51
   %59 = xor i64 %58, %55
   %60 = xor i64 %59, %57
@@ -114,9 +114,9 @@ parse_ip.exit.i:                                  ; preds = %39, %35, %33
   ]
 
 ; <label>:65                                      ; preds = %parse_ip.exit.i
-  %66 = call i64 @llvm.bpf.load.half(i8* %1, i64 %nhoff.2.i) #2
+  %66 = call i64 @llvm.bpf.load.half(ptr %1, i64 %nhoff.2.i) #2
   %67 = add i64 %nhoff.2.i, 2
-  %68 = call i64 @llvm.bpf.load.half(i8* %1, i64 %67) #2
+  %68 = call i64 @llvm.bpf.load.half(ptr %1, i64 %67) #2
   %69 = and i64 %66, 1856
   %70 = icmp eq i64 %69, 0
   br i1 %70, label %71, label %187
@@ -139,7 +139,7 @@ parse_ip.exit.i:                                  ; preds = %39, %35, %33
 
 ; <label>:82                                      ; preds = %71
   %83 = add i64 %nhoff.4..i, 2
-  %84 = call i64 @llvm.bpf.load.half(i8* %1, i64 %83) #2
+  %84 = call i64 @llvm.bpf.load.half(ptr %1, i64 %83) #2
   %85 = add i64 %nhoff.4..i, 4
   br label %86
 
@@ -153,30 +153,30 @@ parse_ip.exit.i:                                  ; preds = %39, %35, %33
 
 ; <label>:87                                      ; preds = %86
   %88 = add i64 %nhoff.6.i, 6
-  %89 = call i64 @llvm.bpf.load.half(i8* %1, i64 %88) #2
+  %89 = call i64 @llvm.bpf.load.half(ptr %1, i64 %88) #2
   %90 = and i64 %89, 16383
   %91 = icmp eq i64 %90, 0
   br i1 %91, label %92, label %.thread.i4.i
 
 ; <label>:92                                      ; preds = %87
   %93 = add i64 %nhoff.6.i, 9
-  %94 = call i64 @llvm.bpf.load.byte(i8* %1, i64 %93) #2
+  %94 = call i64 @llvm.bpf.load.byte(ptr %1, i64 %93) #2
   %95 = icmp eq i64 %94, 47
   br i1 %95, label %102, label %.thread.i4.i
 
 .thread.i4.i:                                     ; preds = %92, %87
   %96 = phi i64 [ %94, %92 ], [ 0, %87 ]
   %97 = add i64 %nhoff.6.i, 12
-  %98 = call i64 @llvm.bpf.load.word(i8* %1, i64 %97) #2
+  %98 = call i64 @llvm.bpf.load.word(ptr %1, i64 %97) #2
   %99 = add i64 %nhoff.6.i, 16
-  %100 = call i64 @llvm.bpf.load.word(i8* %1, i64 %99) #2
+  %100 = call i64 @llvm.bpf.load.word(ptr %1, i64 %99) #2
   %101 = trunc i64 %100 to i32
   br label %102
 
 ; <label>:102                                     ; preds = %.thread.i4.i, %92
   %103 = phi i32 [ %101, %.thread.i4.i ], [ %63, %92 ]
   %104 = phi i64 [ %96, %.thread.i4.i ], [ 47, %92 ]
-  %105 = call i64 @llvm.bpf.load.byte(i8* %1, i64 %nhoff.6.i) #2
+  %105 = call i64 @llvm.bpf.load.byte(ptr %1, i64 %nhoff.6.i) #2
   %106 = icmp eq i64 %105, 69
   br i1 %106, label %107, label %109
 
@@ -192,23 +192,23 @@ parse_ip.exit.i:                                  ; preds = %39, %35, %33
 
 ; <label>:113                                     ; preds = %86
   %114 = add i64 %nhoff.6.i, 6
-  %115 = call i64 @llvm.bpf.load.byte(i8* %1, i64 %114) #2
+  %115 = call i64 @llvm.bpf.load.byte(ptr %1, i64 %114) #2
   %116 = add i64 %nhoff.6.i, 8
-  %117 = call i64 @llvm.bpf.load.word(i8* %1, i64 %116) #2
+  %117 = call i64 @llvm.bpf.load.word(ptr %1, i64 %116) #2
   %118 = add i64 %nhoff.6.i, 12
-  %119 = call i64 @llvm.bpf.load.word(i8* %1, i64 %118) #2
+  %119 = call i64 @llvm.bpf.load.word(ptr %1, i64 %118) #2
   %120 = add i64 %nhoff.6.i, 16
-  %121 = call i64 @llvm.bpf.load.word(i8* %1, i64 %120) #2
+  %121 = call i64 @llvm.bpf.load.word(ptr %1, i64 %120) #2
   %122 = add i64 %nhoff.6.i, 20
-  %123 = call i64 @llvm.bpf.load.word(i8* %1, i64 %122) #2
+  %123 = call i64 @llvm.bpf.load.word(ptr %1, i64 %122) #2
   %124 = add i64 %nhoff.6.i, 24
-  %125 = call i64 @llvm.bpf.load.word(i8* %1, i64 %124) #2
+  %125 = call i64 @llvm.bpf.load.word(ptr %1, i64 %124) #2
   %126 = add i64 %nhoff.6.i, 28
-  %127 = call i64 @llvm.bpf.load.word(i8* %1, i64 %126) #2
+  %127 = call i64 @llvm.bpf.load.word(ptr %1, i64 %126) #2
   %128 = add i64 %nhoff.6.i, 32
-  %129 = call i64 @llvm.bpf.load.word(i8* %1, i64 %128) #2
+  %129 = call i64 @llvm.bpf.load.word(ptr %1, i64 %128) #2
   %130 = add i64 %nhoff.6.i, 36
-  %131 = call i64 @llvm.bpf.load.word(i8* %1, i64 %130) #2
+  %131 = call i64 @llvm.bpf.load.word(ptr %1, i64 %130) #2
   %132 = xor i64 %127, %125
   %133 = xor i64 %132, %129
   %134 = xor i64 %133, %131
@@ -218,30 +218,30 @@ parse_ip.exit.i:                                  ; preds = %39, %35, %33
 
 ; <label>:137                                     ; preds = %parse_ip.exit.i
   %138 = add i64 %nhoff.2.i, 6
-  %139 = call i64 @llvm.bpf.load.half(i8* %1, i64 %138) #2
+  %139 = call i64 @llvm.bpf.load.half(ptr %1, i64 %138) #2
   %140 = and i64 %139, 16383
   %141 = icmp eq i64 %140, 0
   br i1 %141, label %142, label %.thread.i1.i
 
 ; <label>:142                                     ; preds = %137
   %143 = add i64 %nhoff.2.i, 9
-  %144 = call i64 @llvm.bpf.load.byte(i8* %1, i64 %143) #2
+  %144 = call i64 @llvm.bpf.load.byte(ptr %1, i64 %143) #2
   %145 = icmp eq i64 %144, 47
   br i1 %145, label %152, label %.thread.i1.i
 
 .thread.i1.i:                                     ; preds = %142, %137
   %146 = phi i64 [ %144, %142 ], [ 0, %137 ]
   %147 = add i64 %nhoff.2.i, 12
-  %148 = call i64 @llvm.bpf.load.word(i8* %1, i64 %147) #2
+  %148 = call i64 @llvm.bpf.load.word(ptr %1, i64 %147) #2
   %149 = add i64 %nhoff.2.i, 16
-  %150 = call i64 @llvm.bpf.load.word(i8* %1, i64 %149) #2
+  %150 = call i64 @llvm.bpf.load.word(ptr %1, i64 %149) #2
   %151 = trunc i64 %150 to i32
   br label %152
 
 ; <label>:152                                     ; preds = %.thread.i1.i, %142
   %153 = phi i32 [ %151, %.thread.i1.i ], [ %63, %142 ]
   %154 = phi i64 [ %146, %.thread.i1.i ], [ 47, %142 ]
-  %155 = call i64 @llvm.bpf.load.byte(i8* %1, i64 %nhoff.2.i) #2
+  %155 = call i64 @llvm.bpf.load.byte(ptr %1, i64 %nhoff.2.i) #2
   %156 = icmp eq i64 %155, 69
   br i1 %156, label %157, label %159
 
@@ -257,23 +257,23 @@ parse_ip.exit.i:                                  ; preds = %39, %35, %33
 
 ; <label>:163                                     ; preds = %parse_ip.exit.i
   %164 = add i64 %nhoff.2.i, 6
-  %165 = call i64 @llvm.bpf.load.byte(i8* %1, i64 %164) #2
+  %165 = call i64 @llvm.bpf.load.byte(ptr %1, i64 %164) #2
   %166 = add i64 %nhoff.2.i, 8
-  %167 = call i64 @llvm.bpf.load.word(i8* %1, i64 %166) #2
+  %167 = call i64 @llvm.bpf.load.word(ptr %1, i64 %166) #2
   %168 = add i64 %nhoff.2.i, 12
-  %169 = call i64 @llvm.bpf.load.word(i8* %1, i64 %168) #2
+  %169 = call i64 @llvm.bpf.load.word(ptr %1, i64 %168) #2
   %170 = add i64 %nhoff.2.i, 16
-  %171 = call i64 @llvm.bpf.load.word(i8* %1, i64 %170) #2
+  %171 = call i64 @llvm.bpf.load.word(ptr %1, i64 %170) #2
   %172 = add i64 %nhoff.2.i, 20
-  %173 = call i64 @llvm.bpf.load.word(i8* %1, i64 %172) #2
+  %173 = call i64 @llvm.bpf.load.word(ptr %1, i64 %172) #2
   %174 = add i64 %nhoff.2.i, 24
-  %175 = call i64 @llvm.bpf.load.word(i8* %1, i64 %174) #2
+  %175 = call i64 @llvm.bpf.load.word(ptr %1, i64 %174) #2
   %176 = add i64 %nhoff.2.i, 28
-  %177 = call i64 @llvm.bpf.load.word(i8* %1, i64 %176) #2
+  %177 = call i64 @llvm.bpf.load.word(ptr %1, i64 %176) #2
   %178 = add i64 %nhoff.2.i, 32
-  %179 = call i64 @llvm.bpf.load.word(i8* %1, i64 %178) #2
+  %179 = call i64 @llvm.bpf.load.word(ptr %1, i64 %178) #2
   %180 = add i64 %nhoff.2.i, 36
-  %181 = call i64 @llvm.bpf.load.word(i8* %1, i64 %180) #2
+  %181 = call i64 @llvm.bpf.load.word(ptr %1, i64 %180) #2
   %182 = xor i64 %177, %175
   %183 = xor i64 %182, %179
   %184 = xor i64 %183, %181
@@ -288,22 +288,22 @@ parse_ip.exit.i:                                  ; preds = %39, %35, %33
   %cond.i.i = icmp eq i64 %189, 51
   %190 = select i1 %cond.i.i, i64 4, i64 0
   %191 = add i64 %190, %nhoff.7.i
-  %192 = call i64 @llvm.bpf.load.word(i8* %1, i64 %191) #2
-  store i32 %188, i32* %key, align 4
-  %193 = bitcast i32* %key to i8*
-  %194 = call i8* inttoptr (i64 1 to i8* (i8*, i8*)*)(i8* bitcast (%struct.bpf_map_def* @hash_map to i8*), i8* %193) #2
-  %195 = icmp eq i8* %194, null
+  %192 = call i64 @llvm.bpf.load.word(ptr %1, i64 %191) #2
+  store i32 %188, ptr %key, align 4
+  %193 = bitcast ptr %key to ptr
+  %194 = call ptr inttoptr (i64 1 to ptr)(ptr @hash_map, ptr %193) #2
+  %195 = icmp eq ptr %194, null
   br i1 %195, label %199, label %196
 
 ; <label>:196                                     ; preds = %187
-  %197 = bitcast i8* %194 to i64*
-  %198 = atomicrmw add i64* %197, i64 1 seq_cst
+  %197 = bitcast ptr %194 to ptr
+  %198 = atomicrmw add ptr %197, i64 1 seq_cst
   br label %flow_dissector.exit.thread
 
 ; <label>:199                                     ; preds = %187
-  store i64 1, i64* %val, align 8
-  %200 = bitcast i64* %val to i8*
-  %201 = call i32 inttoptr (i64 2 to i32 (i8*, i8*, i8*, i64)*)(i8* bitcast (%struct.bpf_map_def* @hash_map to i8*), i8* %193, i8* %200, i64 0) #2
+  store i64 1, ptr %val, align 8
+  %200 = bitcast ptr %val to ptr
+  %201 = call i32 inttoptr (i64 2 to ptr)(ptr @hash_map, ptr %193, ptr %200, i64 0) #2
   br label %flow_dissector.exit.thread
 
 flow_dissector.exit.thread:                       ; preds = %86, %12, %196, %199
@@ -317,8 +317,8 @@ flow_dissector.exit.thread:                       ; preds = %86, %12, %196, %199
 ; CHECK: call 2 # encoding: [0x85,0x00,0x00,0x00,0x02,0x00,0x00,0x00]
 }
 
-declare i64 @llvm.bpf.load.half(i8*, i64) #1
+declare i64 @llvm.bpf.load.half(ptr, i64) #1
 
-declare i64 @llvm.bpf.load.word(i8*, i64) #1
+declare i64 @llvm.bpf.load.word(ptr, i64) #1
 
-declare i64 @llvm.bpf.load.byte(i8*, i64) #1
+declare i64 @llvm.bpf.load.byte(ptr, i64) #1

diff  --git a/llvm/test/CodeGen/BPF/xadd.ll b/llvm/test/CodeGen/BPF/xadd.ll
index 4f16c061d9d81..4901d9380ac4d 100644
--- a/llvm/test/CodeGen/BPF/xadd.ll
+++ b/llvm/test/CodeGen/BPF/xadd.ll
@@ -17,11 +17,11 @@ target datalayout = "e-m:e-p:64:64-i64:64-n32:64-S128"
 target triple = "bpf"
 
 ; Function Attrs: nounwind
-define dso_local i32 @test(i32* nocapture %ptr) local_unnamed_addr #0 !dbg !7 {
+define dso_local i32 @test(ptr nocapture %ptr) local_unnamed_addr #0 !dbg !7 {
 entry:
-  call void @llvm.dbg.value(metadata i32* %ptr, metadata !13, metadata !DIExpression()), !dbg !15
-  %0 = atomicrmw add i32* %ptr, i32 4 seq_cst, !dbg !16
-  %1 = atomicrmw add i32* %ptr, i32 6 seq_cst, !dbg !17
+  call void @llvm.dbg.value(metadata ptr %ptr, metadata !13, metadata !DIExpression()), !dbg !15
+  %0 = atomicrmw add ptr %ptr, i32 4 seq_cst, !dbg !16
+  %1 = atomicrmw add ptr %ptr, i32 6 seq_cst, !dbg !17
 ; CHECK: line 4: Invalid usage of the XADD return value
   call void @llvm.dbg.value(metadata i32 %1, metadata !14, metadata !DIExpression()), !dbg !18
   ret i32 %1, !dbg !19

diff  --git a/llvm/test/CodeGen/BPF/xadd_legal.ll b/llvm/test/CodeGen/BPF/xadd_legal.ll
index 0d30084af8bf4..f4d4f737cc619 100644
--- a/llvm/test/CodeGen/BPF/xadd_legal.ll
+++ b/llvm/test/CodeGen/BPF/xadd_legal.ll
@@ -15,12 +15,12 @@
 ; could effectively create sub-register reference coming from indexing a full
 ; register which could then exerceise hasLivingDefs inside BPFMIChecker.cpp.
 
-define dso_local i32 @test(i32* nocapture %ptr, i64 %a) {
+define dso_local i32 @test(ptr nocapture %ptr, i64 %a) {
 entry:
   %conv = trunc i64 %a to i32
-  %0 = atomicrmw add i32* %ptr, i32 %conv seq_cst
+  %0 = atomicrmw add ptr %ptr, i32 %conv seq_cst
 ; CHECK-64: lock *(u32 *)(r1 + 0) += r2
 ; CHECK-32: lock *(u32 *)(r1 + 0) += w2
-  %1 = load i32, i32* %ptr, align 4
+  %1 = load i32, ptr %ptr, align 4
   ret i32 %1
 }

diff  --git a/llvm/test/CodeGen/Generic/DbgValueAggregate.ll b/llvm/test/CodeGen/Generic/DbgValueAggregate.ll
index 29d408d2ebe93..4851a2585c107 100644
--- a/llvm/test/CodeGen/Generic/DbgValueAggregate.ll
+++ b/llvm/test/CodeGen/Generic/DbgValueAggregate.ll
@@ -4,7 +4,7 @@ target triple = "aarch64-unknown-linux-gnu"
 
 define void @MAIN_() #0 {
 L.entry:
-  %0 = load <{ float, float }>, <{ float, float }>* undef, align 1
+  %0 = load <{ float, float }>, ptr undef, align 1
   ; CHECK: DEBUG_VALUE: localvar
   ; CHECK: DEBUG_VALUE: localvar
   call void @llvm.dbg.value(metadata <{ float, float }> %0, metadata !10, metadata !DIExpression()), !dbg !13

diff  --git a/llvm/test/CodeGen/Generic/MIRDebugify/check-line-and-variables-x.mir b/llvm/test/CodeGen/Generic/MIRDebugify/check-line-and-variables-x.mir
index 8614fc04ddf65..eaa627966347f 100644
--- a/llvm/test/CodeGen/Generic/MIRDebugify/check-line-and-variables-x.mir
+++ b/llvm/test/CodeGen/Generic/MIRDebugify/check-line-and-variables-x.mir
@@ -13,7 +13,7 @@
     call void @llvm.dbg.value(metadata i32 %add, metadata !12, metadata !DIExpression()), !dbg !15
     %mul = shl nsw i32 %add, 1, !dbg !16
     call void @llvm.dbg.value(metadata i32 %mul, metadata !14, metadata !DIExpression()), !dbg !16
-    store i32 %mul, i32* @ga, align 4, !dbg !17
+    store i32 %mul, ptr @ga, align 4, !dbg !17
     ret i32 %add, !dbg !18
   }
 

diff  --git a/llvm/test/CodeGen/Generic/MIRDebugify/check-line-and-variables.mir b/llvm/test/CodeGen/Generic/MIRDebugify/check-line-and-variables.mir
index d1fb9cd409fea..9eb722258b703 100644
--- a/llvm/test/CodeGen/Generic/MIRDebugify/check-line-and-variables.mir
+++ b/llvm/test/CodeGen/Generic/MIRDebugify/check-line-and-variables.mir
@@ -13,18 +13,18 @@
     %a.addr = alloca i32, align 4
     %b.addr = alloca i32, align 4
     %c = alloca i32, align 4
-    store i32 %a, i32* %a.addr, align 4
-    store i32 %b, i32* %b.addr, align 4
-    %0 = load i32, i32* %a.addr, align 4
-    %1 = load i32, i32* %b.addr, align 4
+    store i32 %a, ptr %a.addr, align 4
+    store i32 %b, ptr %b.addr, align 4
+    %0 = load i32, ptr %a.addr, align 4
+    %1 = load i32, ptr %b.addr, align 4
     %add = add nsw i32 %0, %1
-    store i32 %add, i32* %c, align 4
-    %2 = load i32, i32* %c, align 4
+    store i32 %add, ptr %c, align 4
+    %2 = load i32, ptr %c, align 4
     %mul = mul nsw i32 %2, 2
-    store i32 %mul, i32* @ga, align 4
-    %3 = load i32, i32* %c, align 4
+    store i32 %mul, ptr @ga, align 4
+    %3 = load i32, ptr %c, align 4
     ; dead-mi-elimination will remove %4 = ...
-    %4 = load i32, i32* %a.addr, align 4
+    %4 = load i32, ptr %a.addr, align 4
     ret i32 %3
   }
 

diff  --git a/llvm/test/CodeGen/Generic/MIRStripDebug/all.mir b/llvm/test/CodeGen/Generic/MIRStripDebug/all.mir
index 7af3dde811f1c..0404b991779a5 100644
--- a/llvm/test/CodeGen/Generic/MIRStripDebug/all.mir
+++ b/llvm/test/CodeGen/Generic/MIRStripDebug/all.mir
@@ -21,7 +21,7 @@
   declare void @llvm.dbg.value(metadata, metadata, metadata) #0
   
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #1
+  declare void @llvm.stackprotector(ptr, ptr) #1
   
   attributes #0 = { nounwind readnone speculatable willreturn }
   attributes #1 = { nounwind }

diff  --git a/llvm/test/CodeGen/Generic/MIRStripDebug/dont-strip-real-debug-info.mir b/llvm/test/CodeGen/Generic/MIRStripDebug/dont-strip-real-debug-info.mir
index 329f3b0a55fa6..01a3b2faeb7de 100644
--- a/llvm/test/CodeGen/Generic/MIRStripDebug/dont-strip-real-debug-info.mir
+++ b/llvm/test/CodeGen/Generic/MIRStripDebug/dont-strip-real-debug-info.mir
@@ -23,7 +23,7 @@
   declare void @llvm.dbg.value(metadata, metadata, metadata) #0
   
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #1
+  declare void @llvm.stackprotector(ptr, ptr) #1
   
   attributes #0 = { nounwind readnone speculatable willreturn }
   attributes #1 = { nounwind }

diff  --git a/llvm/test/CodeGen/Generic/MIRStripDebug/multiple-moduleflags.mir b/llvm/test/CodeGen/Generic/MIRStripDebug/multiple-moduleflags.mir
index 792e1d47a4299..9240c1a56eb9b 100644
--- a/llvm/test/CodeGen/Generic/MIRStripDebug/multiple-moduleflags.mir
+++ b/llvm/test/CodeGen/Generic/MIRStripDebug/multiple-moduleflags.mir
@@ -21,7 +21,7 @@
   declare void @llvm.dbg.value(metadata, metadata, metadata) #0
   
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #1
+  declare void @llvm.stackprotector(ptr, ptr) #1
   
   attributes #0 = { nounwind readnone speculatable willreturn }
   attributes #1 = { nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/fsplat.ll b/llvm/test/CodeGen/Hexagon/autohvx/fsplat.ll
index f64674bd0e847..219eb4d03dd96 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/fsplat.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/fsplat.ll
@@ -7,7 +7,7 @@
 target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
 target triple = "hexagon"
 ; Function Attrs: nofree norecurse nounwind writeonly
-define dso_local i32 @foo(float* nocapture %0, i32 %1) local_unnamed_addr #0 {
+define dso_local i32 @foo(ptr nocapture %0, i32 %1) local_unnamed_addr #0 {
   %3 = icmp sgt i32 %1, 0
   br i1 %3, label %4, label %22
 
@@ -16,23 +16,23 @@ define dso_local i32 @foo(float* nocapture %0, i32 %1) local_unnamed_addr #0 {
   br i1 %5, label %6, label %9
 
 6:                                                ; preds = %20, %4
-  %7 = phi float* [ %0, %4 ], [ %11, %20 ]
+  %7 = phi ptr [ %0, %4 ], [ %11, %20 ]
   %8 = phi i32 [ 0, %4 ], [ %10, %20 ]
   br label %23
 
 9:                                                ; preds = %4
   %10 = and i32 %1, -64
-  %11 = getelementptr float, float* %0, i32 %10
+  %11 = getelementptr float, ptr %0, i32 %10
   br label %12
 
 12:                                               ; preds = %12, %9
   %13 = phi i32 [ 0, %9 ], [ %18, %12 ]
-  %14 = getelementptr float, float* %0, i32 %13
-  %15 = bitcast float* %14 to <32 x float>*
-  store <32 x float> <float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01>, <32 x float>* %15, align 4
-  %16 = getelementptr float, float* %14, i32 32
-  %17 = bitcast float* %16 to <32 x float>*
-  store <32 x float> <float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01>, <32 x float>* %17, align 4
+  %14 = getelementptr float, ptr %0, i32 %13
+  %15 = bitcast ptr %14 to ptr
+  store <32 x float> <float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01>, ptr %15, align 4
+  %16 = getelementptr float, ptr %14, i32 32
+  %17 = bitcast ptr %16 to ptr
+  store <32 x float> <float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01>, ptr %17, align 4
   %18 = add i32 %13, 64
   %19 = icmp eq i32 %18, %10
   br i1 %19, label %20, label %12
@@ -45,12 +45,12 @@ define dso_local i32 @foo(float* nocapture %0, i32 %1) local_unnamed_addr #0 {
   ret i32 0
 
 23:                                               ; preds = %23, %6
-  %24 = phi float* [ %28, %23 ], [ %7, %6 ]
+  %24 = phi ptr [ %28, %23 ], [ %7, %6 ]
   %25 = phi i32 [ %26, %23 ], [ %8, %6 ]
-  store float 1.000000e+01, float* %24, align 4
+  store float 1.000000e+01, ptr %24, align 4
   %26 = add nuw nsw i32 %25, 1
   %27 = icmp eq i32 %26, %1
-  %28 = getelementptr float, float* %24, i32 1
+  %28 = getelementptr float, ptr %24, i32 1
   br i1 %27, label %22, label %23
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/hfsplat.ll b/llvm/test/CodeGen/Hexagon/autohvx/hfsplat.ll
index 3a6847876ea5f..78ea32cc10403 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/hfsplat.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/hfsplat.ll
@@ -7,7 +7,7 @@
 target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
 target triple = "hexagon"
 ; Function Attrs: nofree norecurse nounwind writeonly
-define dso_local i32 @foo(half* nocapture %0, i32 %1) local_unnamed_addr #0 {
+define dso_local i32 @foo(ptr nocapture %0, i32 %1) local_unnamed_addr #0 {
   %3 = icmp sgt i32 %1, 0
   br i1 %3, label %4, label %22
 
@@ -16,23 +16,23 @@ define dso_local i32 @foo(half* nocapture %0, i32 %1) local_unnamed_addr #0 {
   br i1 %5, label %6, label %9
 
 6:                                                ; preds = %20, %4
-  %7 = phi half* [ %0, %4 ], [ %11, %20 ]
+  %7 = phi ptr [ %0, %4 ], [ %11, %20 ]
   %8 = phi i32 [ 0, %4 ], [ %10, %20 ]
   br label %23
 
 9:                                                ; preds = %4
   %10 = and i32 %1, -128
-  %11 = getelementptr half, half* %0, i32 %10
+  %11 = getelementptr half, ptr %0, i32 %10
   br label %12
 
 12:                                               ; preds = %12, %9
   %13 = phi i32 [ 0, %9 ], [ %18, %12 ]
-  %14 = getelementptr half, half* %0, i32 %13
-  %15 = bitcast half* %14 to <64 x half>*
-  store <64 x half> <half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170>, <64 x half>* %15, align 2
-  %16 = getelementptr half, half* %14, i32 64
-  %17 = bitcast half* %16 to <64 x half>*
-  store <64 x half> <half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170>, <64 x half>* %17, align 2
+  %14 = getelementptr half, ptr %0, i32 %13
+  %15 = bitcast ptr %14 to ptr
+  store <64 x half> <half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170>, ptr %15, align 2
+  %16 = getelementptr half, ptr %14, i32 64
+  %17 = bitcast ptr %16 to ptr
+  store <64 x half> <half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170>, ptr %17, align 2
   %18 = add i32 %13, 128
   %19 = icmp eq i32 %18, %10
   br i1 %19, label %20, label %12
@@ -45,12 +45,12 @@ define dso_local i32 @foo(half* nocapture %0, i32 %1) local_unnamed_addr #0 {
   ret i32 0
 
 23:                                               ; preds = %23, %6
-  %24 = phi half* [ %28, %23 ], [ %7, %6 ]
+  %24 = phi ptr [ %28, %23 ], [ %7, %6 ]
   %25 = phi i32 [ %26, %23 ], [ %8, %6 ]
-  store half 0xH4170, half* %24, align 2
+  store half 0xH4170, ptr %24, align 2
   %26 = add nuw nsw i32 %25, 1
   %27 = icmp eq i32 %26, %1
-  %28 = getelementptr half, half* %24, i32 1
+  %28 = getelementptr half, ptr %24, i32 1
   br i1 %27, label %22, label %23
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/cmpy-round.ll b/llvm/test/CodeGen/Hexagon/cmpy-round.ll
index f3da928ea83ad..5999cf19af717 100644
--- a/llvm/test/CodeGen/Hexagon/cmpy-round.ll
+++ b/llvm/test/CodeGen/Hexagon/cmpy-round.ll
@@ -19,14 +19,14 @@ define i32 @f0() #0 {
 b0:
   %v0 = alloca i32, align 4
   %v1 = alloca i32, align 4
-  store i32 0, i32* %v0
-  store i32 0, i32* %v1, align 4
+  store i32 0, ptr %v0
+  store i32 0, ptr %v1, align 4
   %v2 = call i32 @llvm.hexagon.A2.roundsat(i64 1)
-  store i32 %v2, i32* @g1, align 4
+  store i32 %v2, ptr @g1, align 4
   %v3 = call i32 @llvm.hexagon.M4.cmpyi.wh(i64 -2147483648, i32 -2147483648)
-  store i32 %v3, i32* @g0, align 4
+  store i32 %v3, ptr @g0, align 4
   %v4 = call i32 @llvm.hexagon.M4.cmpyr.whc(i64 2147483647, i32 2147483647)
-  store i32 %v4, i32* @g2, align 4
+  store i32 %v4, ptr @g2, align 4
   %v5 = call i32 @llvm.hexagon.M4.cmpyi.whc(i64 -2147483648, i32 -2147483648)
   ret i32 %v5
 }

diff  --git a/llvm/test/CodeGen/Hexagon/const-pool-tf.ll b/llvm/test/CodeGen/Hexagon/const-pool-tf.ll
index e67892537ef6e..b7a4b0aa58e8c 100644
--- a/llvm/test/CodeGen/Hexagon/const-pool-tf.ll
+++ b/llvm/test/CodeGen/Hexagon/const-pool-tf.ll
@@ -22,9 +22,9 @@ call_destructor.exit:                             ; preds = %entry
   %0 = shl nsw i32 %h.s0.x.x.us, 5
   %1 = add i32 %0, %tmp22.us
   %2 = add nsw i32 %1, 16
-  %3 = getelementptr inbounds i32, i32* null, i32 %2
-  %4 = bitcast i32* %3 to <16 x i32>*
-  store <16 x i32> zeroinitializer, <16 x i32>* %4, align 4, !tbaa !2
+  %3 = getelementptr inbounds i32, ptr null, i32 %2
+  %4 = bitcast ptr %3 to ptr
+  store <16 x i32> zeroinitializer, ptr %4, align 4, !tbaa !2
   %5 = add nuw nsw i32 %h.s0.x.x.us, 1
   br label %"for h.s0.x.x.us"
 }

diff  --git a/llvm/test/CodeGen/Hexagon/debug-prologue-loc.ll b/llvm/test/CodeGen/Hexagon/debug-prologue-loc.ll
index b9c9cee01c169..c70267c116184 100644
--- a/llvm/test/CodeGen/Hexagon/debug-prologue-loc.ll
+++ b/llvm/test/CodeGen/Hexagon/debug-prologue-loc.ll
@@ -12,16 +12,16 @@ define i32 @f0(i32 %a0, i32 %a1) #0 !dbg !5 {
 b0:
   %v0 = alloca i32, align 4
   %v1 = alloca i32, align 4
-  %v2 = alloca i32*, align 4
-  store i32 %a0, i32* %v0, align 4
-  call void @llvm.dbg.declare(metadata i32* %v0, metadata !9, metadata !DIExpression()), !dbg !10
-  store i32 %a1, i32* %v1, align 4
-  call void @llvm.dbg.declare(metadata i32* %v1, metadata !11, metadata !DIExpression()), !dbg !12
-  call void @llvm.dbg.declare(metadata i32** %v2, metadata !13, metadata !DIExpression()), !dbg !15
-  store i32* %v1, i32** %v2, align 4, !dbg !15
-  %v3 = load i32, i32* %v0, align 4, !dbg !16
-  %v4 = load i32*, i32** %v2, align 4, !dbg !17
-  %v5 = call i32 @f1(i32* %v4), !dbg !18
+  %v2 = alloca ptr, align 4
+  store i32 %a0, ptr %v0, align 4
+  call void @llvm.dbg.declare(metadata ptr %v0, metadata !9, metadata !DIExpression()), !dbg !10
+  store i32 %a1, ptr %v1, align 4
+  call void @llvm.dbg.declare(metadata ptr %v1, metadata !11, metadata !DIExpression()), !dbg !12
+  call void @llvm.dbg.declare(metadata ptr %v2, metadata !13, metadata !DIExpression()), !dbg !15
+  store ptr %v1, ptr %v2, align 4, !dbg !15
+  %v3 = load i32, ptr %v0, align 4, !dbg !16
+  %v4 = load ptr, ptr %v2, align 4, !dbg !17
+  %v5 = call i32 @f1(ptr %v4), !dbg !18
   %v6 = add nsw i32 %v3, %v5, !dbg !19
   ret i32 %v6, !dbg !20
 }
@@ -30,11 +30,11 @@ b0:
 declare void @llvm.dbg.declare(metadata, metadata, metadata) #1
 
 ; Function Attrs: nounwind
-define i32 @f1(i32* %a0) #0 !dbg !21 {
+define i32 @f1(ptr %a0) #0 !dbg !21 {
 b0:
-  %v0 = alloca i32*, align 4
-  store i32* %a0, i32** %v0, align 4
-  call void @llvm.dbg.declare(metadata i32** %v0, metadata !24, metadata !DIExpression()), !dbg !25
+  %v0 = alloca ptr, align 4
+  store ptr %a0, ptr %v0, align 4
+  call void @llvm.dbg.declare(metadata ptr %v0, metadata !24, metadata !DIExpression()), !dbg !25
   ret i32 0, !dbg !26
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/fixed-spill-mutable.ll b/llvm/test/CodeGen/Hexagon/fixed-spill-mutable.ll
index 03aa72bc5a8cb..5ffbc06f1bd27 100644
--- a/llvm/test/CodeGen/Hexagon/fixed-spill-mutable.ll
+++ b/llvm/test/CodeGen/Hexagon/fixed-spill-mutable.ll
@@ -20,34 +20,34 @@
 
 target triple = "hexagon"
 
-%struct.0 = type { i8*, %struct.1*, %struct.2*, %struct.0*, %struct.0* }
-%struct.1 = type { [60 x i8], i32, %struct.1* }
+%struct.0 = type { ptr, ptr, ptr, ptr, ptr }
+%struct.1 = type { [60 x i8], i32, ptr }
 %struct.2 = type { i8, i8, i8, i8, %union.anon }
-%union.anon = type { %struct.3* }
-%struct.3 = type { %struct.3*, %struct.2* }
+%union.anon = type { ptr }
+%struct.3 = type { ptr, ptr }
 
- at var = external hidden unnamed_addr global %struct.0*, align 4
+ at var = external hidden unnamed_addr global ptr, align 4
 
-declare void @bar(i8*, i32) local_unnamed_addr #0
+declare void @bar(ptr, i32) local_unnamed_addr #0
 
 define void @foo() local_unnamed_addr #1 {
 entry:
-  %.pr = load %struct.0*, %struct.0** @var, align 4, !tbaa !1
-  %cmp2 = icmp eq %struct.0* %.pr, null
+  %.pr = load ptr, ptr @var, align 4, !tbaa !1
+  %cmp2 = icmp eq ptr %.pr, null
   br i1 %cmp2, label %while.end, label %while.body.preheader
 
 while.body.preheader:                             ; preds = %entry
   br label %while.body
 
 while.body:                                       ; preds = %while.body.preheader, %while.body
-  %0 = phi %struct.0* [ %4, %while.body ], [ %.pr, %while.body.preheader ]
-  %right = getelementptr inbounds %struct.0, %struct.0* %0, i32 0, i32 4
-  %1 = bitcast %struct.0** %right to i32*
-  %2 = load i32, i32* %1, align 4, !tbaa !5
-  %3 = bitcast %struct.0* %0 to i8*
-  tail call void @bar(i8* %3, i32 20) #1
-  store i32 %2, i32* bitcast (%struct.0** @var to i32*), align 4, !tbaa !1
-  %4 = inttoptr i32 %2 to %struct.0*
+  %0 = phi ptr [ %4, %while.body ], [ %.pr, %while.body.preheader ]
+  %right = getelementptr inbounds %struct.0, ptr %0, i32 0, i32 4
+  %1 = bitcast ptr %right to ptr
+  %2 = load i32, ptr %1, align 4, !tbaa !5
+  %3 = bitcast ptr %0 to ptr
+  tail call void @bar(ptr %3, i32 20) #1
+  store i32 %2, ptr @var, align 4, !tbaa !1
+  %4 = inttoptr i32 %2 to ptr
   %cmp = icmp eq i32 %2, 0
   br i1 %cmp, label %while.end.loopexit, label %while.body
 

diff  --git a/llvm/test/CodeGen/Hexagon/machine-sink-float-usr.mir b/llvm/test/CodeGen/Hexagon/machine-sink-float-usr.mir
index ba023bde92515..45621d98da3ef 100644
--- a/llvm/test/CodeGen/Hexagon/machine-sink-float-usr.mir
+++ b/llvm/test/CodeGen/Hexagon/machine-sink-float-usr.mir
@@ -20,10 +20,10 @@
   }
 
   ; Function Attrs: argmemonly mustprogress nofree nosync nounwind willreturn
-  declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture) #1
+  declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
 
   ; Function Attrs: argmemonly mustprogress nofree nosync nounwind willreturn
-  declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture) #1
+  declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
 
   ; Function Attrs: nounwind
   define dso_local i32 @main() local_unnamed_addr #2 {
@@ -31,16 +31,16 @@
     %a = alloca i32, align 4
     %b = alloca i32, align 4
     %c = alloca i32, align 4
-    %a.0.a.0.a.0.a.0..sroa_cast = bitcast i32* %a to i8*
-    call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %a.0.a.0.a.0.a.0..sroa_cast)
-    store volatile i32 -16777235, i32* %a, align 4, !tbaa !3
-    %b.0.b.0.b.0.b.0..sroa_cast = bitcast i32* %b to i8*
-    call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %b.0.b.0.b.0.b.0..sroa_cast)
-    store volatile i32 34, i32* %b, align 4, !tbaa !3
-    %c.0.c.0.c.0.c.0..sroa_cast = bitcast i32* %c to i8*
-    call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %c.0.c.0.c.0.c.0..sroa_cast)
-    store volatile i32 34, i32* %c, align 4, !tbaa !3
-    %b.0.b.0.b.0.b.0.29 = load volatile i32, i32* %b, align 4, !tbaa !3
+    %a.0.a.0.a.0.a.0..sroa_cast = bitcast ptr %a to ptr
+    call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %a.0.a.0.a.0.a.0..sroa_cast)
+    store volatile i32 -16777235, ptr %a, align 4, !tbaa !3
+    %b.0.b.0.b.0.b.0..sroa_cast = bitcast ptr %b to ptr
+    call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %b.0.b.0.b.0.b.0..sroa_cast)
+    store volatile i32 34, ptr %b, align 4, !tbaa !3
+    %c.0.c.0.c.0.c.0..sroa_cast = bitcast ptr %c to ptr
+    call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %c.0.c.0.c.0.c.0..sroa_cast)
+    store volatile i32 34, ptr %c, align 4, !tbaa !3
+    %b.0.b.0.b.0.b.0.29 = load volatile i32, ptr %b, align 4, !tbaa !3
     %cmp30 = icmp sgt i32 %b.0.b.0.b.0.b.0.29, 0
     br i1 %cmp30, label %for.body, label %if.end
 
@@ -52,44 +52,44 @@
 
   for.body:                                         ; preds = %entry, %for.body
     %i.031 = phi i32 [ %inc4, %for.body ], [ 0, %entry ]
-    %c.0.c.0.c.0.c.0. = load volatile i32, i32* %c, align 4, !tbaa !3
+    %c.0.c.0.c.0.c.0. = load volatile i32, ptr %c, align 4, !tbaa !3
     %inc = add nsw i32 %c.0.c.0.c.0.c.0., 1
-    store volatile i32 %inc, i32* %c, align 4, !tbaa !3
+    store volatile i32 %inc, ptr %c, align 4, !tbaa !3
     %call = tail call i32 @feclearexcept(i32 31) #5
-    %a.0.a.0.a.0.a.0. = load volatile i32, i32* %a, align 4, !tbaa !3
+    %a.0.a.0.a.0.a.0. = load volatile i32, ptr %a, align 4, !tbaa !3
     %call2 = tail call i32 @fetestexcept(i32 31) #5
-    %call3 = tail call i32 (i8*, ...) @printf(i8* nonnull dereferenceable(1) getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), i32 %call2) #5
+    %call3 = tail call i32 (ptr, ...) @printf(ptr nonnull dereferenceable(1) @.str, i32 %call2) #5
     %inc4 = add nuw nsw i32 %i.031, 1
-    %b.0.b.0.b.0.b.0. = load volatile i32, i32* %b, align 4, !tbaa !3
+    %b.0.b.0.b.0.b.0. = load volatile i32, ptr %b, align 4, !tbaa !3
     %cmp = icmp slt i32 %inc4, %b.0.b.0.b.0.b.0.
     br i1 %cmp, label %for.body, label %for.cond.for.cond.cleanup_crit_edge, !llvm.loop !7
 
   if.then:                                          ; preds = %for.cond.for.cond.cleanup_crit_edge
-    %a.0.a.0.a.0.a.0.23 = load volatile i32, i32* %a, align 4, !tbaa !3
-    %b.0.b.0.b.0.b.0.20 = load volatile i32, i32* %b, align 4, !tbaa !3
+    %a.0.a.0.a.0.a.0.23 = load volatile i32, ptr %a, align 4, !tbaa !3
+    %b.0.b.0.b.0.b.0.20 = load volatile i32, ptr %b, align 4, !tbaa !3
     %add = add nsw i32 %b.0.b.0.b.0.b.0.20, %a.0.a.0.a.0.a.0.23
-    %c.0.c.0.c.0.c.0.17 = load volatile i32, i32* %c, align 4, !tbaa !3
+    %c.0.c.0.c.0.c.0.17 = load volatile i32, ptr %c, align 4, !tbaa !3
     %add7 = add nsw i32 %add, %c.0.c.0.c.0.c.0.17
     br label %cleanup
 
   if.end:                                           ; preds = %entry, %for.cond.for.cond.cleanup_crit_edge
-    %a.0.a.0.a.0.a.0.24 = load volatile i32, i32* %a, align 4, !tbaa !3
-    %b.0.b.0.b.0.b.0.21 = load volatile i32, i32* %b, align 4, !tbaa !3
+    %a.0.a.0.a.0.a.0.24 = load volatile i32, ptr %a, align 4, !tbaa !3
+    %b.0.b.0.b.0.b.0.21 = load volatile i32, ptr %b, align 4, !tbaa !3
     %mul.neg = mul i32 %b.0.b.0.b.0.b.0.21, -6
     %sub = add i32 %mul.neg, %a.0.a.0.a.0.a.0.24
-    %c.0.c.0.c.0.c.0.18 = load volatile i32, i32* %c, align 4, !tbaa !3
+    %c.0.c.0.c.0.c.0.18 = load volatile i32, ptr %c, align 4, !tbaa !3
     %mul8 = mul nsw i32 %c.0.c.0.c.0.c.0.18, 3
     %add9 = add nsw i32 %sub, %mul8
     br label %cleanup
 
   cleanup:                                          ; preds = %if.end, %if.then
     %retval.0 = phi i32 [ %add7, %if.then ], [ %add9, %if.end ]
-    %1 = bitcast i32* %c to i8*
-    %2 = bitcast i32* %b to i8*
-    %3 = bitcast i32* %a to i8*
-    call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %1)
-    call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %2)
-    call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %3)
+    %1 = bitcast ptr %c to ptr
+    %2 = bitcast ptr %b to ptr
+    %3 = bitcast ptr %a to ptr
+    call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %1)
+    call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %2)
+    call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %3)
     ret i32 %retval.0
   }
 
@@ -98,7 +98,7 @@
   declare dso_local i32 @fetestexcept(i32) local_unnamed_addr #3
 
   ; Function Attrs: nofree nounwind
-  declare dso_local noundef i32 @printf(i8* nocapture noundef readonly, ...) local_unnamed_addr #4
+  declare dso_local noundef i32 @printf(ptr nocapture noundef readonly, ...) local_unnamed_addr #4
 
   attributes #0 = { mustprogress nofree norecurse nosync nounwind readnone willreturn "frame-pointer"="all" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="hexagonv68" "target-features"="+v68,-long-calls" }
   attributes #1 = { argmemonly mustprogress nofree nosync nounwind willreturn }

diff  --git a/llvm/test/CodeGen/Hexagon/memcpy-likely-aligned.ll b/llvm/test/CodeGen/Hexagon/memcpy-likely-aligned.ll
index c3b62c41e5830..136e41509b731 100644
--- a/llvm/test/CodeGen/Hexagon/memcpy-likely-aligned.ll
+++ b/llvm/test/CodeGen/Hexagon/memcpy-likely-aligned.ll
@@ -20,7 +20,7 @@ entry:
   %0 = load ptr, ptr @t, align 4
   store ptr %0, ptr @q, align 4
   %1 = load ptr, ptr @q, align 4
-  call void @llvm.memcpy.p0.p0.i32(ptr align 4 %1, ptr align 4 getelementptr inbounds ({ <{ { %struct.e, { i8, i8, i8, [5 x i8] }, %struct.e }, { %struct.e, { i8, i8, i8, [5 x i8] }, %struct.e } }> }, ptr @y, i32 0, i32 0, i32 0, i32 0, i32 0), i32 32, i1 false)
+  call void @llvm.memcpy.p0.p0.i32(ptr align 4 %1, ptr align 4 @y, i32 32, i1 false)
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/swp-carried-dep1.mir b/llvm/test/CodeGen/Hexagon/swp-carried-dep1.mir
index 853d7f1bdcd60..c333f1b7f31df 100644
--- a/llvm/test/CodeGen/Hexagon/swp-carried-dep1.mir
+++ b/llvm/test/CodeGen/Hexagon/swp-carried-dep1.mir
@@ -14,37 +14,37 @@
 
   %struct.A = type { i16, i16 }
 
-  define i32 @test(%struct.A* noalias nocapture %s, i16* noalias nocapture readonly %r, i32 %n) {
+  define i32 @test(ptr noalias nocapture %s, ptr noalias nocapture readonly %r, i32 %n) {
   entry:
     %cmp19 = icmp eq i32 %n, 2
     br i1 %cmp19, label %for.end, label %for.body.preheader
 
   for.body.preheader:
     %0 = add i32 %n, -2
-    %cgep = getelementptr %struct.A, %struct.A* %s, i32 2, i32 1
-    %scevgep1 = bitcast i16* %cgep to %struct.A*
-    %cgep9 = getelementptr i16, i16* %r, i32 2
+    %cgep = getelementptr %struct.A, ptr %s, i32 2, i32 1
+    %scevgep1 = bitcast ptr %cgep to ptr
+    %cgep9 = getelementptr i16, ptr %r, i32 2
     br label %for.body
 
   for.body:
-    %lsr.iv7 = phi i16* [ %cgep9, %for.body.preheader ], [ %cgep12, %for.body ]
-    %lsr.iv2 = phi %struct.A* [ %scevgep1, %for.body.preheader ], [ %cgep11, %for.body ]
+    %lsr.iv7 = phi ptr [ %cgep9, %for.body.preheader ], [ %cgep12, %for.body ]
+    %lsr.iv2 = phi ptr [ %scevgep1, %for.body.preheader ], [ %cgep11, %for.body ]
     %lsr.iv = phi i32 [ %0, %for.body.preheader ], [ %lsr.iv.next, %for.body ]
     %sum.020 = phi i32 [ %add7, %for.body ], [ 0, %for.body.preheader ]
-    %lsr.iv24 = bitcast %struct.A* %lsr.iv2 to i16*
-    %1 = load i16, i16* %lsr.iv7, align 2
+    %lsr.iv24 = bitcast ptr %lsr.iv2 to ptr
+    %1 = load i16, ptr %lsr.iv7, align 2
     %conv = sext i16 %1 to i32
-    %cgep10 = getelementptr i16, i16* %lsr.iv24, i32 -4
-    %2 = load i16, i16* %cgep10, align 2
+    %cgep10 = getelementptr i16, ptr %lsr.iv24, i32 -4
+    %2 = load i16, ptr %cgep10, align 2
     %conv2 = sext i16 %2 to i32
     %add = add i16 %1, 10
-    store i16 %add, i16* %lsr.iv24, align 2
+    store i16 %add, ptr %lsr.iv24, align 2
     %add6 = add i32 %sum.020, %conv
     %add7 = add i32 %add6, %conv2
     %lsr.iv.next = add i32 %lsr.iv, -1
     %cmp = icmp eq i32 %lsr.iv.next, 0
-    %cgep11 = getelementptr %struct.A, %struct.A* %lsr.iv2, i32 1
-    %cgep12 = getelementptr i16, i16* %lsr.iv7, i32 1
+    %cgep11 = getelementptr %struct.A, ptr %lsr.iv2, i32 1
+    %cgep12 = getelementptr i16, ptr %lsr.iv7, i32 1
     br i1 %cmp, label %for.end, label %for.body
 
   for.end:

diff  --git a/llvm/test/CodeGen/Hexagon/swp-carried-dep2.mir b/llvm/test/CodeGen/Hexagon/swp-carried-dep2.mir
index b910d5adb3694..16ff5999e29ca 100644
--- a/llvm/test/CodeGen/Hexagon/swp-carried-dep2.mir
+++ b/llvm/test/CodeGen/Hexagon/swp-carried-dep2.mir
@@ -17,18 +17,18 @@
     br label %b3
 
   b3:
-    %lsr.iv = phi [9 x i32]* [ %0, %b3 ], [ undef, %b0 ]
+    %lsr.iv = phi ptr [ %0, %b3 ], [ undef, %b0 ]
     %v0 = phi i32 [ %v8, %b3 ], [ 7, %b0 ]
     %v1 = phi i32 [ %v6, %b3 ], [ undef, %b0 ]
     %v2 = phi i32 [ %v1, %b3 ], [ undef, %b0 ]
-    %lsr.iv1 = bitcast [9 x i32]* %lsr.iv to i32*
-    %cgep = getelementptr i32, i32* %lsr.iv1, i32 -2
-    %v6 = load i32, i32* %cgep, align 4
+    %lsr.iv1 = bitcast ptr %lsr.iv to ptr
+    %cgep = getelementptr i32, ptr %lsr.iv1, i32 -2
+    %v6 = load i32, ptr %cgep, align 4
     %v7 = tail call i32 @llvm.hexagon.A2.subsat(i32 %v2, i32 %v6)
-    store i32 %v7, i32* %lsr.iv1, align 4
+    store i32 %v7, ptr %lsr.iv1, align 4
     %v8 = add i32 %v0, -1
-    %cgep3 = getelementptr [9 x i32], [9 x i32]* %lsr.iv, i32 0, i32 -1
-    %0 = bitcast i32* %cgep3 to [9 x i32]*
+    %cgep3 = getelementptr [9 x i32], ptr %lsr.iv, i32 0, i32 -1
+    %0 = bitcast ptr %cgep3 to ptr
     %v9 = icmp sgt i32 %v8, 1
     br i1 %v9, label %b3, label %b4
 
@@ -37,7 +37,7 @@
   }
 
   declare i32 @llvm.hexagon.A2.subsat(i32, i32) #0
-  declare void @llvm.stackprotector(i8*, i8**) #1
+  declare void @llvm.stackprotector(ptr, ptr) #1
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Hexagon/swp-memrefs-epilog.ll b/llvm/test/CodeGen/Hexagon/swp-memrefs-epilog.ll
index 20e39dd08fd72..adbcfa5158c54 100644
--- a/llvm/test/CodeGen/Hexagon/swp-memrefs-epilog.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-memrefs-epilog.ll
@@ -28,74 +28,74 @@ define i32 @f0() unnamed_addr {
 b0:
   %v0 = alloca %s.0, align 4
   %v1 = alloca %s.4, align 4
-  %v2 = bitcast %s.0* %v0 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 36, i8* %v2)
-  %v3 = getelementptr inbounds %s.0, %s.0* %v0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  store float 0x3FEFFF7160000000, float* %v3, align 4
-  %v4 = getelementptr inbounds %s.0, %s.0* %v0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 1
-  store float 0xBF87867F00000000, float* %v4, align 4
-  %v5 = getelementptr inbounds %s.0, %s.0* %v0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 2
-  store float 0xBF6185CEE0000000, float* %v5, align 4
-  %v6 = getelementptr inbounds %s.0, %s.0* %v0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 3
-  store float 0x3F8780BAA0000000, float* %v6, align 4
-  %v7 = getelementptr inbounds %s.0, %s.0* %v0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 4
-  store float 0x3FEFFF5C60000000, float* %v7, align 4
-  %v8 = getelementptr inbounds %s.0, %s.0* %v0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 5
-  store float 0xBF74717160000000, float* %v8, align 4
-  %v9 = getelementptr inbounds %s.0, %s.0* %v0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 6
-  store float 0x3F61FF7160000000, float* %v9, align 4
-  %v10 = getelementptr inbounds %s.0, %s.0* %v0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 7
-  store float 0x3F74573A80000000, float* %v10, align 4
-  %v11 = getelementptr inbounds %s.0, %s.0* %v0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 8
-  store float 0x3FEFFFE080000000, float* %v11, align 4
-  %v12 = bitcast %s.4* %v1 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 12, i8* %v12)
+  %v2 = bitcast ptr %v0 to ptr
+  call void @llvm.lifetime.start.p0(i64 36, ptr %v2)
+  %v3 = getelementptr inbounds %s.0, ptr %v0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0
+  store float 0x3FEFFF7160000000, ptr %v3, align 4
+  %v4 = getelementptr inbounds %s.0, ptr %v0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 1
+  store float 0xBF87867F00000000, ptr %v4, align 4
+  %v5 = getelementptr inbounds %s.0, ptr %v0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 2
+  store float 0xBF6185CEE0000000, ptr %v5, align 4
+  %v6 = getelementptr inbounds %s.0, ptr %v0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 3
+  store float 0x3F8780BAA0000000, ptr %v6, align 4
+  %v7 = getelementptr inbounds %s.0, ptr %v0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 4
+  store float 0x3FEFFF5C60000000, ptr %v7, align 4
+  %v8 = getelementptr inbounds %s.0, ptr %v0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 5
+  store float 0xBF74717160000000, ptr %v8, align 4
+  %v9 = getelementptr inbounds %s.0, ptr %v0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 6
+  store float 0x3F61FF7160000000, ptr %v9, align 4
+  %v10 = getelementptr inbounds %s.0, ptr %v0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 7
+  store float 0x3F74573A80000000, ptr %v10, align 4
+  %v11 = getelementptr inbounds %s.0, ptr %v0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 8
+  store float 0x3FEFFFE080000000, ptr %v11, align 4
+  %v12 = bitcast ptr %v1 to ptr
+  call void @llvm.lifetime.start.p0(i64 12, ptr %v12)
   br label %b1
 
 b1:                                               ; preds = %b1, %b0
   %v13 = phi i32 [ 0, %b0 ], [ %v29, %b1 ]
   %v14 = mul nuw nsw i32 %v13, 3
-  %v15 = getelementptr inbounds %s.0, %s.0* %v0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 %v14
-  %v16 = load float, float* %v15, align 4
+  %v15 = getelementptr inbounds %s.0, ptr %v0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 %v14
+  %v16 = load float, ptr %v15, align 4
   %v17 = fmul float %v16, 0x3FE7B2B120000000
   %v18 = add nuw nsw i32 %v14, 1
-  %v19 = getelementptr inbounds %s.0, %s.0* %v0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 %v18
-  %v20 = load float, float* %v19, align 4
+  %v19 = getelementptr inbounds %s.0, ptr %v0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 %v18
+  %v20 = load float, ptr %v19, align 4
   %v21 = fmul float %v20, 0x3FDA8BC9C0000000
   %v22 = fsub float %v21, %v17
   %v23 = add nuw nsw i32 %v14, 2
-  %v24 = getelementptr inbounds %s.0, %s.0* %v0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 %v23
-  %v25 = load float, float* %v24, align 4
+  %v24 = getelementptr inbounds %s.0, ptr %v0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 %v23
+  %v25 = load float, ptr %v24, align 4
   %v26 = fmul float %v25, 0x40030D6700000000
   %v27 = fadd float %v22, %v26
-  %v28 = getelementptr inbounds %s.4, %s.4* %v1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 %v13
-  store float %v27, float* %v28, align 4
+  %v28 = getelementptr inbounds %s.4, ptr %v1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 %v13
+  store float %v27, ptr %v28, align 4
   %v29 = add nuw nsw i32 %v13, 1
   %v30 = icmp eq i32 %v29, 3
   br i1 %v30, label %b2, label %b1
 
 b2:                                               ; preds = %b1
-  %v31 = getelementptr inbounds %s.4, %s.4* %v1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %v32 = load float, float* %v31, align 4
+  %v31 = getelementptr inbounds %s.4, ptr %v1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0
+  %v32 = load float, ptr %v31, align 4
   %v33 = fpext float %v32 to double
-  %v34 = getelementptr inbounds %s.4, %s.4* %v1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 1
-  %v35 = load float, float* %v34, align 4
+  %v34 = getelementptr inbounds %s.4, ptr %v1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 1
+  %v35 = load float, ptr %v34, align 4
   %v36 = fpext float %v35 to double
-  %v37 = getelementptr inbounds %s.4, %s.4* %v1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 2
-  %v38 = load float, float* %v37, align 4
+  %v37 = getelementptr inbounds %s.4, ptr %v1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 2
+  %v38 = load float, ptr %v37, align 4
   %v39 = fpext float %v38 to double
-  %v40 = tail call i32 (i8*, ...) @f1(i8* getelementptr inbounds ([29 x i8], [29 x i8]* @g0, i32 0, i32 0), double %v33, double %v36, double %v39)
-  call void @llvm.lifetime.end.p0i8(i64 12, i8* nonnull %v12)
-  call void @llvm.lifetime.end.p0i8(i64 36, i8* nonnull %v2)
+  %v40 = tail call i32 (ptr, ...) @f1(ptr @g0, double %v33, double %v36, double %v39)
+  call void @llvm.lifetime.end.p0(i64 12, ptr nonnull %v12)
+  call void @llvm.lifetime.end.p0(i64 36, ptr nonnull %v2)
   ret i32 0
 }
 
-declare i32 @f1(i8* nocapture readonly, ...) local_unnamed_addr
+declare i32 @f1(ptr nocapture readonly, ...) local_unnamed_addr
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #0
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #0
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #0
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #0
 
 attributes #0 = { argmemonly nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/swp-new-phi.ll b/llvm/test/CodeGen/Hexagon/swp-new-phi.ll
index d3c1058fe36cc..f877722cdf780 100644
--- a/llvm/test/CodeGen/Hexagon/swp-new-phi.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-new-phi.ll
@@ -10,26 +10,26 @@
 ; CHECK: endloop0
 
 ; Function Attrs: argmemonly nounwind
-declare i8* @llvm.hexagon.circ.sthhi(i8*, i32, i32, i32) #1
+declare ptr @llvm.hexagon.circ.sthhi(ptr, i32, i32, i32) #1
 
 ; Function Attrs: nounwind optsize
-define signext i16 @f0(i16* %a0, i16* %a1, i16 signext %a2, i16 signext %a3) #0 {
+define signext i16 @f0(ptr %a0, ptr %a1, i16 signext %a2, i16 signext %a3) #0 {
 b0:
   br label %b1
 
 b1:                                               ; preds = %b1, %b0
-  %v0 = phi i16* [ %v10, %b1 ], [ %a1, %b0 ]
+  %v0 = phi ptr [ %v10, %b1 ], [ %a1, %b0 ]
   %v1 = phi i32 [ %v13, %b1 ], [ 1, %b0 ]
   %v2 = phi i16 [ %v12, %b1 ], [ 0, %b0 ]
-  %v3 = bitcast i16* %v0 to i8*
+  %v3 = bitcast ptr %v0 to ptr
   %v4 = add nsw i32 %v1, 10
-  %v5 = getelementptr inbounds i16, i16* %a0, i32 %v4
-  %v6 = load i16, i16* %v5, align 2, !tbaa !0
+  %v5 = getelementptr inbounds i16, ptr %a0, i32 %v4
+  %v6 = load i16, ptr %v5, align 2, !tbaa !0
   %v7 = sext i16 %v6 to i32
   %v8 = add nsw i32 %v7, 40000
-  %v9 = tail call i8* @llvm.hexagon.circ.sthhi(i8* %v3, i32 %v8, i32 117441022, i32 2)
-  %v10 = bitcast i8* %v9 to i16*
-  %v11 = load i16, i16* %v10, align 2, !tbaa !0
+  %v9 = tail call ptr @llvm.hexagon.circ.sthhi(ptr %v3, i32 %v8, i32 117441022, i32 2)
+  %v10 = bitcast ptr %v9 to ptr
+  %v11 = load i16, ptr %v10, align 2, !tbaa !0
   %v12 = add i16 %v11, %v2
   %v13 = add i32 %v1, 1
   %v14 = icmp eq i32 %v13, 1000

diff  --git a/llvm/test/CodeGen/Hexagon/v5_insns.ll b/llvm/test/CodeGen/Hexagon/v5_insns.ll
index 4cfd3a2cbcfc5..3e9d1e98cf368 100644
--- a/llvm/test/CodeGen/Hexagon/v5_insns.ll
+++ b/llvm/test/CodeGen/Hexagon/v5_insns.ll
@@ -10,8 +10,8 @@ b0:
   %v1 = fptosi double %a0 to i64
   %v2 = tail call i32 @llvm.hexagon.M4.cmpyi.whc(i64 %v1, i32 512)
   %v3 = trunc i32 %v2 to i8
-  store volatile i8 %v3, i8* %v0, align 1
-  %v4 = load volatile i8, i8* %v0, align 1
+  store volatile i8 %v3, ptr %v0, align 1
+  %v4 = load volatile i8, ptr %v0, align 1
   %v5 = zext i8 %v4 to i32
   ret i32 %v5
 }
@@ -27,8 +27,8 @@ b0:
   %v1 = fptosi double %a0 to i64
   %v2 = tail call i32 @llvm.hexagon.M4.cmpyr.whc(i64 %v1, i32 512)
   %v3 = trunc i32 %v2 to i8
-  store volatile i8 %v3, i8* %v0, align 1
-  %v4 = load volatile i8, i8* %v0, align 1
+  store volatile i8 %v3, ptr %v0, align 1
+  %v4 = load volatile i8, ptr %v0, align 1
   %v5 = zext i8 %v4 to i32
   ret i32 %v5
 }
@@ -44,8 +44,8 @@ b0:
   %v1 = fptosi double %a0 to i64
   %v2 = tail call i32 @llvm.hexagon.S5.popcountp(i64 %v1)
   %v3 = trunc i32 %v2 to i8
-  store volatile i8 %v3, i8* %v0, align 1
-  %v4 = load volatile i8, i8* %v0, align 1
+  store volatile i8 %v3, ptr %v0, align 1
+  %v4 = load volatile i8, ptr %v0, align 1
   %v5 = zext i8 %v4 to i32
   ret i32 %v5
 }
@@ -60,8 +60,8 @@ b0:
   %v0 = alloca i8, align 1
   %v1 = tail call i32 @llvm.hexagon.F2.sfclass(float %a0, i32 3)
   %v2 = trunc i32 %v1 to i8
-  store volatile i8 %v2, i8* %v0, align 1
-  %v3 = load volatile i8, i8* %v0, align 1
+  store volatile i8 %v2, ptr %v0, align 1
+  %v3 = load volatile i8, ptr %v0, align 1
   %v4 = zext i8 %v3 to i32
   ret i32 %v4
 }
@@ -77,8 +77,8 @@ b0:
   %v1 = fptosi float %a0 to i64
   %v2 = tail call i32 @llvm.hexagon.S5.asrhub.sat(i64 %v1, i32 3)
   %v3 = trunc i32 %v2 to i8
-  store volatile i8 %v3, i8* %v0, align 1
-  %v4 = load volatile i8, i8* %v0, align 1
+  store volatile i8 %v3, ptr %v0, align 1
+  %v4 = load volatile i8, ptr %v0, align 1
   %v5 = zext i8 %v4 to i32
   ret i32 %v5
 }

diff  --git a/llvm/test/CodeGen/Hexagon/v60Vasr.ll b/llvm/test/CodeGen/Hexagon/v60Vasr.ll
index dd309f6764615..8f53c3e59acce 100644
--- a/llvm/test/CodeGen/Hexagon/v60Vasr.ll
+++ b/llvm/test/CodeGen/Hexagon/v60Vasr.ll
@@ -5,40 +5,40 @@
 target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a:0-n16:32"
 target triple = "hexagon-unknown--elf"
 
-%struct.buffer_t = type { i64, i8*, [4 x i32], [4 x i32], [4 x i32], i32, i8, i8, [6 x i8] }
+%struct.buffer_t = type { i64, ptr, [4 x i32], [4 x i32], [4 x i32], i32, i8, i8, [6 x i8] }
 
 ; Function Attrs: norecurse nounwind
-define i32 @__test_vasr(%struct.buffer_t* noalias nocapture %f.buffer, %struct.buffer_t* noalias nocapture %g.buffer, %struct.buffer_t* noalias nocapture %res.buffer) #0 {
+define i32 @__test_vasr(ptr noalias nocapture %f.buffer, ptr noalias nocapture %g.buffer, ptr noalias nocapture %res.buffer) #0 {
 entry:
-  %buf_host = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %f.buffer, i32 0, i32 1
-  %f.host = load i8*, i8** %buf_host, align 4
-  %buf_dev = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %f.buffer, i32 0, i32 0
-  %f.dev = load i64, i64* %buf_dev, align 8
-  %0 = icmp eq i8* %f.host, null
+  %buf_host = getelementptr inbounds %struct.buffer_t, ptr %f.buffer, i32 0, i32 1
+  %f.host = load ptr, ptr %buf_host, align 4
+  %buf_dev = getelementptr inbounds %struct.buffer_t, ptr %f.buffer, i32 0, i32 0
+  %f.dev = load i64, ptr %buf_dev, align 8
+  %0 = icmp eq ptr %f.host, null
   %1 = icmp eq i64 %f.dev, 0
   %f.host_and_dev_are_null = and i1 %0, %1
-  %buf_min = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %f.buffer, i32 0, i32 4, i32 0
-  %f.min.0 = load i32, i32* %buf_min, align 4
-  %buf_host10 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %g.buffer, i32 0, i32 1
-  %g.host = load i8*, i8** %buf_host10, align 4
-  %buf_dev11 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %g.buffer, i32 0, i32 0
-  %g.dev = load i64, i64* %buf_dev11, align 8
-  %2 = icmp eq i8* %g.host, null
+  %buf_min = getelementptr inbounds %struct.buffer_t, ptr %f.buffer, i32 0, i32 4, i32 0
+  %f.min.0 = load i32, ptr %buf_min, align 4
+  %buf_host10 = getelementptr inbounds %struct.buffer_t, ptr %g.buffer, i32 0, i32 1
+  %g.host = load ptr, ptr %buf_host10, align 4
+  %buf_dev11 = getelementptr inbounds %struct.buffer_t, ptr %g.buffer, i32 0, i32 0
+  %g.dev = load i64, ptr %buf_dev11, align 8
+  %2 = icmp eq ptr %g.host, null
   %3 = icmp eq i64 %g.dev, 0
   %g.host_and_dev_are_null = and i1 %2, %3
-  %buf_min22 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %g.buffer, i32 0, i32 4, i32 0
-  %g.min.0 = load i32, i32* %buf_min22, align 4
-  %buf_host27 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %res.buffer, i32 0, i32 1
-  %res.host = load i8*, i8** %buf_host27, align 4
-  %buf_dev28 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %res.buffer, i32 0, i32 0
-  %res.dev = load i64, i64* %buf_dev28, align 8
-  %4 = icmp eq i8* %res.host, null
+  %buf_min22 = getelementptr inbounds %struct.buffer_t, ptr %g.buffer, i32 0, i32 4, i32 0
+  %g.min.0 = load i32, ptr %buf_min22, align 4
+  %buf_host27 = getelementptr inbounds %struct.buffer_t, ptr %res.buffer, i32 0, i32 1
+  %res.host = load ptr, ptr %buf_host27, align 4
+  %buf_dev28 = getelementptr inbounds %struct.buffer_t, ptr %res.buffer, i32 0, i32 0
+  %res.dev = load i64, ptr %buf_dev28, align 8
+  %4 = icmp eq ptr %res.host, null
   %5 = icmp eq i64 %res.dev, 0
   %res.host_and_dev_are_null = and i1 %4, %5
-  %buf_extent31 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %res.buffer, i32 0, i32 2, i32 0
-  %res.extent.0 = load i32, i32* %buf_extent31, align 4
-  %buf_min39 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %res.buffer, i32 0, i32 4, i32 0
-  %res.min.0 = load i32, i32* %buf_min39, align 4
+  %buf_extent31 = getelementptr inbounds %struct.buffer_t, ptr %res.buffer, i32 0, i32 2, i32 0
+  %res.extent.0 = load i32, ptr %buf_extent31, align 4
+  %buf_min39 = getelementptr inbounds %struct.buffer_t, ptr %res.buffer, i32 0, i32 4, i32 0
+  %res.min.0 = load i32, ptr %buf_min39, align 4
   %6 = add nsw i32 %res.extent.0, -1
   %7 = and i32 %6, -64
   %8 = add i32 %res.min.0, 63
@@ -54,95 +54,95 @@ entry:
   br i1 %f.host_and_dev_are_null, label %true_bb, label %after_bb
 
 true_bb:                                          ; preds = %entry
-  %buf_elem_size44 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %f.buffer, i32 0, i32 5
-  store i32 1, i32* %buf_elem_size44, align 4
-  store i32 %16, i32* %buf_min, align 4
+  %buf_elem_size44 = getelementptr inbounds %struct.buffer_t, ptr %f.buffer, i32 0, i32 5
+  store i32 1, ptr %buf_elem_size44, align 4
+  store i32 %16, ptr %buf_min, align 4
   %17 = add nsw i32 %f.extent.0.required.s, 1
-  %buf_extent46 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %f.buffer, i32 0, i32 2, i32 0
-  store i32 %17, i32* %buf_extent46, align 4
-  %buf_stride47 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %f.buffer, i32 0, i32 3, i32 0
-  store i32 1, i32* %buf_stride47, align 4
-  %buf_min48 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %f.buffer, i32 0, i32 4, i32 1
-  store i32 0, i32* %buf_min48, align 4
-  %buf_extent49 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %f.buffer, i32 0, i32 2, i32 1
-  store i32 0, i32* %buf_extent49, align 4
-  %buf_stride50 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %f.buffer, i32 0, i32 3, i32 1
-  store i32 0, i32* %buf_stride50, align 4
-  %buf_min51 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %f.buffer, i32 0, i32 4, i32 2
-  store i32 0, i32* %buf_min51, align 4
-  %buf_extent52 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %f.buffer, i32 0, i32 2, i32 2
-  store i32 0, i32* %buf_extent52, align 4
-  %buf_stride53 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %f.buffer, i32 0, i32 3, i32 2
-  store i32 0, i32* %buf_stride53, align 4
-  %buf_min54 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %f.buffer, i32 0, i32 4, i32 3
-  store i32 0, i32* %buf_min54, align 4
-  %buf_extent55 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %f.buffer, i32 0, i32 2, i32 3
-  store i32 0, i32* %buf_extent55, align 4
-  %buf_stride56 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %f.buffer, i32 0, i32 3, i32 3
-  store i32 0, i32* %buf_stride56, align 4
+  %buf_extent46 = getelementptr inbounds %struct.buffer_t, ptr %f.buffer, i32 0, i32 2, i32 0
+  store i32 %17, ptr %buf_extent46, align 4
+  %buf_stride47 = getelementptr inbounds %struct.buffer_t, ptr %f.buffer, i32 0, i32 3, i32 0
+  store i32 1, ptr %buf_stride47, align 4
+  %buf_min48 = getelementptr inbounds %struct.buffer_t, ptr %f.buffer, i32 0, i32 4, i32 1
+  store i32 0, ptr %buf_min48, align 4
+  %buf_extent49 = getelementptr inbounds %struct.buffer_t, ptr %f.buffer, i32 0, i32 2, i32 1
+  store i32 0, ptr %buf_extent49, align 4
+  %buf_stride50 = getelementptr inbounds %struct.buffer_t, ptr %f.buffer, i32 0, i32 3, i32 1
+  store i32 0, ptr %buf_stride50, align 4
+  %buf_min51 = getelementptr inbounds %struct.buffer_t, ptr %f.buffer, i32 0, i32 4, i32 2
+  store i32 0, ptr %buf_min51, align 4
+  %buf_extent52 = getelementptr inbounds %struct.buffer_t, ptr %f.buffer, i32 0, i32 2, i32 2
+  store i32 0, ptr %buf_extent52, align 4
+  %buf_stride53 = getelementptr inbounds %struct.buffer_t, ptr %f.buffer, i32 0, i32 3, i32 2
+  store i32 0, ptr %buf_stride53, align 4
+  %buf_min54 = getelementptr inbounds %struct.buffer_t, ptr %f.buffer, i32 0, i32 4, i32 3
+  store i32 0, ptr %buf_min54, align 4
+  %buf_extent55 = getelementptr inbounds %struct.buffer_t, ptr %f.buffer, i32 0, i32 2, i32 3
+  store i32 0, ptr %buf_extent55, align 4
+  %buf_stride56 = getelementptr inbounds %struct.buffer_t, ptr %f.buffer, i32 0, i32 3, i32 3
+  store i32 0, ptr %buf_stride56, align 4
   br label %after_bb
 
 after_bb:                                         ; preds = %true_bb, %entry
   br i1 %g.host_and_dev_are_null, label %true_bb57, label %after_bb59
 
 true_bb57:                                        ; preds = %after_bb
-  %buf_elem_size60 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %g.buffer, i32 0, i32 5
-  store i32 1, i32* %buf_elem_size60, align 4
-  store i32 %16, i32* %buf_min22, align 4
+  %buf_elem_size60 = getelementptr inbounds %struct.buffer_t, ptr %g.buffer, i32 0, i32 5
+  store i32 1, ptr %buf_elem_size60, align 4
+  store i32 %16, ptr %buf_min22, align 4
   %18 = add nsw i32 %f.extent.0.required.s, 1
-  %buf_extent62 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %g.buffer, i32 0, i32 2, i32 0
-  store i32 %18, i32* %buf_extent62, align 4
-  %buf_stride63 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %g.buffer, i32 0, i32 3, i32 0
-  store i32 1, i32* %buf_stride63, align 4
-  %buf_min64 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %g.buffer, i32 0, i32 4, i32 1
-  store i32 0, i32* %buf_min64, align 4
-  %buf_extent65 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %g.buffer, i32 0, i32 2, i32 1
-  store i32 0, i32* %buf_extent65, align 4
-  %buf_stride66 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %g.buffer, i32 0, i32 3, i32 1
-  store i32 0, i32* %buf_stride66, align 4
-  %buf_min67 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %g.buffer, i32 0, i32 4, i32 2
-  store i32 0, i32* %buf_min67, align 4
-  %buf_extent68 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %g.buffer, i32 0, i32 2, i32 2
-  store i32 0, i32* %buf_extent68, align 4
-  %buf_stride69 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %g.buffer, i32 0, i32 3, i32 2
-  store i32 0, i32* %buf_stride69, align 4
-  %buf_min70 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %g.buffer, i32 0, i32 4, i32 3
-  store i32 0, i32* %buf_min70, align 4
-  %buf_extent71 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %g.buffer, i32 0, i32 2, i32 3
-  store i32 0, i32* %buf_extent71, align 4
-  %buf_stride72 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %g.buffer, i32 0, i32 3, i32 3
-  store i32 0, i32* %buf_stride72, align 4
+  %buf_extent62 = getelementptr inbounds %struct.buffer_t, ptr %g.buffer, i32 0, i32 2, i32 0
+  store i32 %18, ptr %buf_extent62, align 4
+  %buf_stride63 = getelementptr inbounds %struct.buffer_t, ptr %g.buffer, i32 0, i32 3, i32 0
+  store i32 1, ptr %buf_stride63, align 4
+  %buf_min64 = getelementptr inbounds %struct.buffer_t, ptr %g.buffer, i32 0, i32 4, i32 1
+  store i32 0, ptr %buf_min64, align 4
+  %buf_extent65 = getelementptr inbounds %struct.buffer_t, ptr %g.buffer, i32 0, i32 2, i32 1
+  store i32 0, ptr %buf_extent65, align 4
+  %buf_stride66 = getelementptr inbounds %struct.buffer_t, ptr %g.buffer, i32 0, i32 3, i32 1
+  store i32 0, ptr %buf_stride66, align 4
+  %buf_min67 = getelementptr inbounds %struct.buffer_t, ptr %g.buffer, i32 0, i32 4, i32 2
+  store i32 0, ptr %buf_min67, align 4
+  %buf_extent68 = getelementptr inbounds %struct.buffer_t, ptr %g.buffer, i32 0, i32 2, i32 2
+  store i32 0, ptr %buf_extent68, align 4
+  %buf_stride69 = getelementptr inbounds %struct.buffer_t, ptr %g.buffer, i32 0, i32 3, i32 2
+  store i32 0, ptr %buf_stride69, align 4
+  %buf_min70 = getelementptr inbounds %struct.buffer_t, ptr %g.buffer, i32 0, i32 4, i32 3
+  store i32 0, ptr %buf_min70, align 4
+  %buf_extent71 = getelementptr inbounds %struct.buffer_t, ptr %g.buffer, i32 0, i32 2, i32 3
+  store i32 0, ptr %buf_extent71, align 4
+  %buf_stride72 = getelementptr inbounds %struct.buffer_t, ptr %g.buffer, i32 0, i32 3, i32 3
+  store i32 0, ptr %buf_stride72, align 4
   br label %after_bb59
 
 after_bb59:                                       ; preds = %true_bb57, %after_bb
   br i1 %res.host_and_dev_are_null, label %after_bb75.thread, label %after_bb75
 
 after_bb75.thread:                                ; preds = %after_bb59
-  %buf_elem_size76 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %res.buffer, i32 0, i32 5
-  store i32 1, i32* %buf_elem_size76, align 4
-  store i32 %16, i32* %buf_min39, align 4
+  %buf_elem_size76 = getelementptr inbounds %struct.buffer_t, ptr %res.buffer, i32 0, i32 5
+  store i32 1, ptr %buf_elem_size76, align 4
+  store i32 %16, ptr %buf_min39, align 4
   %19 = add nsw i32 %f.extent.0.required.s, 1
-  store i32 %19, i32* %buf_extent31, align 4
-  %buf_stride79 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %res.buffer, i32 0, i32 3, i32 0
-  store i32 1, i32* %buf_stride79, align 4
-  %buf_min80 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %res.buffer, i32 0, i32 4, i32 1
-  store i32 0, i32* %buf_min80, align 4
-  %buf_extent81 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %res.buffer, i32 0, i32 2, i32 1
-  store i32 0, i32* %buf_extent81, align 4
-  %buf_stride82 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %res.buffer, i32 0, i32 3, i32 1
-  store i32 0, i32* %buf_stride82, align 4
-  %buf_min83 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %res.buffer, i32 0, i32 4, i32 2
-  store i32 0, i32* %buf_min83, align 4
-  %buf_extent84 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %res.buffer, i32 0, i32 2, i32 2
-  store i32 0, i32* %buf_extent84, align 4
-  %buf_stride85 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %res.buffer, i32 0, i32 3, i32 2
-  store i32 0, i32* %buf_stride85, align 4
-  %buf_min86 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %res.buffer, i32 0, i32 4, i32 3
-  store i32 0, i32* %buf_min86, align 4
-  %buf_extent87 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %res.buffer, i32 0, i32 2, i32 3
-  store i32 0, i32* %buf_extent87, align 4
-  %buf_stride88 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %res.buffer, i32 0, i32 3, i32 3
-  store i32 0, i32* %buf_stride88, align 4
+  store i32 %19, ptr %buf_extent31, align 4
+  %buf_stride79 = getelementptr inbounds %struct.buffer_t, ptr %res.buffer, i32 0, i32 3, i32 0
+  store i32 1, ptr %buf_stride79, align 4
+  %buf_min80 = getelementptr inbounds %struct.buffer_t, ptr %res.buffer, i32 0, i32 4, i32 1
+  store i32 0, ptr %buf_min80, align 4
+  %buf_extent81 = getelementptr inbounds %struct.buffer_t, ptr %res.buffer, i32 0, i32 2, i32 1
+  store i32 0, ptr %buf_extent81, align 4
+  %buf_stride82 = getelementptr inbounds %struct.buffer_t, ptr %res.buffer, i32 0, i32 3, i32 1
+  store i32 0, ptr %buf_stride82, align 4
+  %buf_min83 = getelementptr inbounds %struct.buffer_t, ptr %res.buffer, i32 0, i32 4, i32 2
+  store i32 0, ptr %buf_min83, align 4
+  %buf_extent84 = getelementptr inbounds %struct.buffer_t, ptr %res.buffer, i32 0, i32 2, i32 2
+  store i32 0, ptr %buf_extent84, align 4
+  %buf_stride85 = getelementptr inbounds %struct.buffer_t, ptr %res.buffer, i32 0, i32 3, i32 2
+  store i32 0, ptr %buf_stride85, align 4
+  %buf_min86 = getelementptr inbounds %struct.buffer_t, ptr %res.buffer, i32 0, i32 4, i32 3
+  store i32 0, ptr %buf_min86, align 4
+  %buf_extent87 = getelementptr inbounds %struct.buffer_t, ptr %res.buffer, i32 0, i32 2, i32 3
+  store i32 0, ptr %buf_extent87, align 4
+  %buf_stride88 = getelementptr inbounds %struct.buffer_t, ptr %res.buffer, i32 0, i32 3, i32 3
+  store i32 0, ptr %buf_stride88, align 4
   br label %destructor_block
 
 after_bb75:                                       ; preds = %after_bb59
@@ -159,22 +159,22 @@ after_bb75:                                       ; preds = %after_bb59
   %23 = shl nsw i32 %res.s0.x.x, 6
   %24 = add nsw i32 %23, %res.min.0
   %25 = sub nsw i32 %24, %f.min.0
-  %26 = getelementptr inbounds i8, i8* %f.host, i32 %25
-  %27 = bitcast i8* %26 to <16 x i32>*
-  %28 = load <16 x i32>, <16 x i32>* %27, align 1, !tbaa !5
+  %26 = getelementptr inbounds i8, ptr %f.host, i32 %25
+  %27 = bitcast ptr %26 to ptr
+  %28 = load <16 x i32>, ptr %27, align 1, !tbaa !5
   %29 = tail call <32 x i32> @llvm.hexagon.V6.vzb(<16 x i32> %28)
   %30 = sub nsw i32 %24, %g.min.0
-  %31 = getelementptr inbounds i8, i8* %g.host, i32 %30
-  %32 = bitcast i8* %31 to <16 x i32>*
-  %33 = load <16 x i32>, <16 x i32>* %32, align 1, !tbaa !8
+  %31 = getelementptr inbounds i8, ptr %g.host, i32 %30
+  %32 = bitcast ptr %31 to ptr
+  %33 = load <16 x i32>, ptr %32, align 1, !tbaa !8
   %34 = tail call <32 x i32> @llvm.hexagon.V6.vzb(<16 x i32> %33)
   %35 = tail call <32 x i32> @llvm.hexagon.V6.vaddh.dv(<32 x i32> %29, <32 x i32> %34)
   %36 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %35)
   %37 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %35)
   %38 = tail call <16 x i32> @llvm.hexagon.V6.vasrhubsat(<16 x i32> %36, <16 x i32> %37, i32 4)
-  %39 = getelementptr inbounds i8, i8* %res.host, i32 %23
-  %40 = bitcast i8* %39 to <16 x i32>*
-  store <16 x i32> %38, <16 x i32>* %40, align 1, !tbaa !10
+  %39 = getelementptr inbounds i8, ptr %res.host, i32 %23
+  %40 = bitcast ptr %39 to ptr
+  store <16 x i32> %38, ptr %40, align 1, !tbaa !10
   %41 = add nuw nsw i32 %res.s0.x.x, 1
   %42 = icmp eq i32 %41, %21
   br i1 %42, label %"end for res.s0.x.x", label %"for res.s0.x.x"
@@ -188,24 +188,24 @@ after_bb75:                                       ; preds = %after_bb59
 "for res.s0.x.x92.preheader":                     ; preds = %"end for res.s0.x.x"
   %46 = sub i32 -64, %f.min.0
   %47 = add i32 %46, %10
-  %48 = getelementptr inbounds i8, i8* %f.host, i32 %47
-  %49 = bitcast i8* %48 to <16 x i32>*
-  %50 = load <16 x i32>, <16 x i32>* %49, align 1
+  %48 = getelementptr inbounds i8, ptr %f.host, i32 %47
+  %49 = bitcast ptr %48 to ptr
+  %50 = load <16 x i32>, ptr %49, align 1
   %51 = tail call <32 x i32> @llvm.hexagon.V6.vzb(<16 x i32> %50)
   %52 = sub i32 -64, %g.min.0
   %53 = add i32 %52, %10
-  %54 = getelementptr inbounds i8, i8* %g.host, i32 %53
-  %55 = bitcast i8* %54 to <16 x i32>*
-  %56 = load <16 x i32>, <16 x i32>* %55, align 1
+  %54 = getelementptr inbounds i8, ptr %g.host, i32 %53
+  %55 = bitcast ptr %54 to ptr
+  %56 = load <16 x i32>, ptr %55, align 1
   %57 = tail call <32 x i32> @llvm.hexagon.V6.vzb(<16 x i32> %56)
   %58 = tail call <32 x i32> @llvm.hexagon.V6.vaddh.dv(<32 x i32> %51, <32 x i32> %57)
   %59 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %58)
   %60 = add nsw i32 %res.extent.0, -64
-  %61 = getelementptr inbounds i8, i8* %res.host, i32 %60
+  %61 = getelementptr inbounds i8, ptr %res.host, i32 %60
   %62 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %58)
   %63 = tail call <16 x i32> @llvm.hexagon.V6.vasrhubsat(<16 x i32> %62, <16 x i32> %59, i32 4)
-  %64 = bitcast i8* %61 to <16 x i32>*
-  store <16 x i32> %63, <16 x i32>* %64, align 1, !tbaa !10
+  %64 = bitcast ptr %61 to ptr
+  store <16 x i32> %63, ptr %64, align 1, !tbaa !10
   br label %destructor_block
 
 destructor_block:                                 ; preds = %"for res.s0.x.x92.preheader", %"end for res.s0.x.x", %after_bb75, %after_bb75.thread

diff  --git a/llvm/test/CodeGen/Hexagon/vdmpy-halide-test.ll b/llvm/test/CodeGen/Hexagon/vdmpy-halide-test.ll
index 352398e7bbeaf..ab06fb09f662c 100644
--- a/llvm/test/CodeGen/Hexagon/vdmpy-halide-test.ll
+++ b/llvm/test/CodeGen/Hexagon/vdmpy-halide-test.ll
@@ -4,87 +4,87 @@
 ; Thie tests checks a compiler assert. So the test just needs to compile for it to pass
 target triple = "hexagon-unknown--elf"
 
-%struct.buffer_t = type { i64, i8*, [4 x i32], [4 x i32], [4 x i32], i32, i8, i8, [6 x i8] }
+%struct.buffer_t = type { i64, ptr, [4 x i32], [4 x i32], [4 x i32], i32, i8, i8, [6 x i8] }
 
 ; Function Attrs: norecurse nounwind
-define i32 @__testOne(%struct.buffer_t* noalias nocapture readonly %inputOne.buffer, %struct.buffer_t* noalias nocapture readonly %inputTwo.buffer, %struct.buffer_t* noalias nocapture readonly %testOne.buffer) #0 {
+define i32 @__testOne(ptr noalias nocapture readonly %inputOne.buffer, ptr noalias nocapture readonly %inputTwo.buffer, ptr noalias nocapture readonly %testOne.buffer) #0 {
 entry:
-  %buf_host = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %inputOne.buffer, i32 0, i32 1
-  %inputOne.host = load i8*, i8** %buf_host, align 4
-  %buf_min = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %inputOne.buffer, i32 0, i32 4, i32 0
-  %inputOne.min.0 = load i32, i32* %buf_min, align 4
-  %buf_host10 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %inputTwo.buffer, i32 0, i32 1
-  %inputTwo.host = load i8*, i8** %buf_host10, align 4
-  %buf_min22 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %inputTwo.buffer, i32 0, i32 4, i32 0
-  %inputTwo.min.0 = load i32, i32* %buf_min22, align 4
-  %buf_host27 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %testOne.buffer, i32 0, i32 1
-  %testOne.host = load i8*, i8** %buf_host27, align 4
-  %buf_extent31 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %testOne.buffer, i32 0, i32 2, i32 0
-  %testOne.extent.0 = load i32, i32* %buf_extent31, align 4
-  %buf_min39 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %testOne.buffer, i32 0, i32 4, i32 0
-  %testOne.min.0 = load i32, i32* %buf_min39, align 4
+  %buf_host = getelementptr inbounds %struct.buffer_t, ptr %inputOne.buffer, i32 0, i32 1
+  %inputOne.host = load ptr, ptr %buf_host, align 4
+  %buf_min = getelementptr inbounds %struct.buffer_t, ptr %inputOne.buffer, i32 0, i32 4, i32 0
+  %inputOne.min.0 = load i32, ptr %buf_min, align 4
+  %buf_host10 = getelementptr inbounds %struct.buffer_t, ptr %inputTwo.buffer, i32 0, i32 1
+  %inputTwo.host = load ptr, ptr %buf_host10, align 4
+  %buf_min22 = getelementptr inbounds %struct.buffer_t, ptr %inputTwo.buffer, i32 0, i32 4, i32 0
+  %inputTwo.min.0 = load i32, ptr %buf_min22, align 4
+  %buf_host27 = getelementptr inbounds %struct.buffer_t, ptr %testOne.buffer, i32 0, i32 1
+  %testOne.host = load ptr, ptr %buf_host27, align 4
+  %buf_extent31 = getelementptr inbounds %struct.buffer_t, ptr %testOne.buffer, i32 0, i32 2, i32 0
+  %testOne.extent.0 = load i32, ptr %buf_extent31, align 4
+  %buf_min39 = getelementptr inbounds %struct.buffer_t, ptr %testOne.buffer, i32 0, i32 4, i32 0
+  %testOne.min.0 = load i32, ptr %buf_min39, align 4
   %0 = ashr i32 %testOne.extent.0, 4
   %1 = icmp sgt i32 %0, 0
   br i1 %1, label %"for testOne.s0.x.x.preheader", label %"end for testOne.s0.x.x"
 
 "for testOne.s0.x.x.preheader":                   ; preds = %entry
-  %2 = bitcast i8* %inputOne.host to i16*
-  %3 = bitcast i8* %inputTwo.host to i16*
-  %4 = bitcast i8* %testOne.host to i32*
+  %2 = bitcast ptr %inputOne.host to ptr
+  %3 = bitcast ptr %inputTwo.host to ptr
+  %4 = bitcast ptr %testOne.host to ptr
   br label %"for testOne.s0.x.x"
 
 "for testOne.s0.x.x":                             ; preds = %"for testOne.s0.x.x", %"for testOne.s0.x.x.preheader"
-  %.phi = phi i32* [ %4, %"for testOne.s0.x.x.preheader" ], [ %.inc, %"for testOne.s0.x.x" ]
+  %.phi = phi ptr [ %4, %"for testOne.s0.x.x.preheader" ], [ %.inc, %"for testOne.s0.x.x" ]
   %testOne.s0.x.x = phi i32 [ 0, %"for testOne.s0.x.x.preheader" ], [ %50, %"for testOne.s0.x.x" ]
   %5 = shl nsw i32 %testOne.s0.x.x, 4
   %6 = add nsw i32 %5, %testOne.min.0
   %7 = shl nsw i32 %6, 1
   %8 = sub nsw i32 %7, %inputOne.min.0
-  %9 = getelementptr inbounds i16, i16* %2, i32 %8
-  %10 = bitcast i16* %9 to <16 x i16>*
-  %11 = load <16 x i16>, <16 x i16>* %10, align 2, !tbaa !5
+  %9 = getelementptr inbounds i16, ptr %2, i32 %8
+  %10 = bitcast ptr %9 to ptr
+  %11 = load <16 x i16>, ptr %10, align 2, !tbaa !5
   %12 = add nsw i32 %8, 15
-  %13 = getelementptr inbounds i16, i16* %2, i32 %12
-  %14 = bitcast i16* %13 to <16 x i16>*
-  %15 = load <16 x i16>, <16 x i16>* %14, align 2, !tbaa !5
+  %13 = getelementptr inbounds i16, ptr %2, i32 %12
+  %14 = bitcast ptr %13 to ptr
+  %15 = load <16 x i16>, ptr %14, align 2, !tbaa !5
   %16 = shufflevector <16 x i16> %11, <16 x i16> %15, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
   %17 = add nsw i32 %8, 1
-  %18 = getelementptr inbounds i16, i16* %2, i32 %17
-  %19 = bitcast i16* %18 to <16 x i16>*
-  %20 = load <16 x i16>, <16 x i16>* %19, align 2, !tbaa !5
+  %18 = getelementptr inbounds i16, ptr %2, i32 %17
+  %19 = bitcast ptr %18 to ptr
+  %20 = load <16 x i16>, ptr %19, align 2, !tbaa !5
   %21 = add nsw i32 %8, 16
-  %22 = getelementptr inbounds i16, i16* %2, i32 %21
-  %23 = bitcast i16* %22 to <16 x i16>*
-  %24 = load <16 x i16>, <16 x i16>* %23, align 2, !tbaa !5
+  %22 = getelementptr inbounds i16, ptr %2, i32 %21
+  %23 = bitcast ptr %22 to ptr
+  %24 = load <16 x i16>, ptr %23, align 2, !tbaa !5
   %25 = shufflevector <16 x i16> %20, <16 x i16> %24, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
   %26 = shufflevector <16 x i16> %16, <16 x i16> %25, <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
   %27 = sub nsw i32 %7, %inputTwo.min.0
-  %28 = getelementptr inbounds i16, i16* %3, i32 %27
-  %29 = bitcast i16* %28 to <16 x i16>*
-  %30 = load <16 x i16>, <16 x i16>* %29, align 2, !tbaa !8
+  %28 = getelementptr inbounds i16, ptr %3, i32 %27
+  %29 = bitcast ptr %28 to ptr
+  %30 = load <16 x i16>, ptr %29, align 2, !tbaa !8
   %31 = add nsw i32 %27, 15
-  %32 = getelementptr inbounds i16, i16* %3, i32 %31
-  %33 = bitcast i16* %32 to <16 x i16>*
-  %34 = load <16 x i16>, <16 x i16>* %33, align 2, !tbaa !8
+  %32 = getelementptr inbounds i16, ptr %3, i32 %31
+  %33 = bitcast ptr %32 to ptr
+  %34 = load <16 x i16>, ptr %33, align 2, !tbaa !8
   %35 = shufflevector <16 x i16> %30, <16 x i16> %34, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
   %36 = add nsw i32 %27, 1
-  %37 = getelementptr inbounds i16, i16* %3, i32 %36
-  %38 = bitcast i16* %37 to <16 x i16>*
-  %39 = load <16 x i16>, <16 x i16>* %38, align 2, !tbaa !8
+  %37 = getelementptr inbounds i16, ptr %3, i32 %36
+  %38 = bitcast ptr %37 to ptr
+  %39 = load <16 x i16>, ptr %38, align 2, !tbaa !8
   %40 = add nsw i32 %27, 16
-  %41 = getelementptr inbounds i16, i16* %3, i32 %40
-  %42 = bitcast i16* %41 to <16 x i16>*
-  %43 = load <16 x i16>, <16 x i16>* %42, align 2, !tbaa !8
+  %41 = getelementptr inbounds i16, ptr %3, i32 %40
+  %42 = bitcast ptr %41 to ptr
+  %43 = load <16 x i16>, ptr %42, align 2, !tbaa !8
   %44 = shufflevector <16 x i16> %39, <16 x i16> %43, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
   %45 = shufflevector <16 x i16> %35, <16 x i16> %44, <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
   %46 = bitcast <32 x i16> %26 to <16 x i32>
   %47 = bitcast <32 x i16> %45 to <16 x i32>
   %48 = tail call <16 x i32> @llvm.hexagon.V6.vdmpyhvsat(<16 x i32> %46, <16 x i32> %47)
-  %49 = bitcast i32* %.phi to <16 x i32>*
-  store <16 x i32> %48, <16 x i32>* %49, align 4, !tbaa !10
+  %49 = bitcast ptr %.phi to ptr
+  store <16 x i32> %48, ptr %49, align 4, !tbaa !10
   %50 = add nuw nsw i32 %testOne.s0.x.x, 1
   %51 = icmp eq i32 %50, %0
-  %.inc = getelementptr i32, i32* %.phi, i32 16
+  %.inc = getelementptr i32, ptr %.phi, i32 16
   br i1 %51, label %"end for testOne.s0.x.x", label %"for testOne.s0.x.x"
 
 "end for testOne.s0.x.x":                         ; preds = %"for testOne.s0.x.x", %entry
@@ -98,54 +98,54 @@ entry:
   %56 = shl nsw i32 %55, 1
   %57 = sub nsw i32 %56, %inputOne.min.0
   %58 = add nsw i32 %57, -32
-  %59 = bitcast i8* %inputOne.host to i16*
-  %60 = getelementptr inbounds i16, i16* %59, i32 %58
-  %61 = bitcast i16* %60 to <16 x i16>*
-  %62 = load <16 x i16>, <16 x i16>* %61, align 2
+  %59 = bitcast ptr %inputOne.host to ptr
+  %60 = getelementptr inbounds i16, ptr %59, i32 %58
+  %61 = bitcast ptr %60 to ptr
+  %62 = load <16 x i16>, ptr %61, align 2
   %63 = add nsw i32 %57, -17
-  %64 = getelementptr inbounds i16, i16* %59, i32 %63
-  %65 = bitcast i16* %64 to <16 x i16>*
-  %66 = load <16 x i16>, <16 x i16>* %65, align 2
+  %64 = getelementptr inbounds i16, ptr %59, i32 %63
+  %65 = bitcast ptr %64 to ptr
+  %66 = load <16 x i16>, ptr %65, align 2
   %67 = add nsw i32 %57, -31
-  %68 = getelementptr inbounds i16, i16* %59, i32 %67
-  %69 = bitcast i16* %68 to <16 x i16>*
-  %70 = load <16 x i16>, <16 x i16>* %69, align 2
+  %68 = getelementptr inbounds i16, ptr %59, i32 %67
+  %69 = bitcast ptr %68 to ptr
+  %70 = load <16 x i16>, ptr %69, align 2
   %71 = add nsw i32 %57, -16
-  %72 = getelementptr inbounds i16, i16* %59, i32 %71
-  %73 = bitcast i16* %72 to <16 x i16>*
-  %74 = load <16 x i16>, <16 x i16>* %73, align 2
+  %72 = getelementptr inbounds i16, ptr %59, i32 %71
+  %73 = bitcast ptr %72 to ptr
+  %74 = load <16 x i16>, ptr %73, align 2
   %75 = shufflevector <16 x i16> %70, <16 x i16> %74, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
   %76 = sub nsw i32 %56, %inputTwo.min.0
   %77 = add nsw i32 %76, -32
-  %78 = bitcast i8* %inputTwo.host to i16*
-  %79 = getelementptr inbounds i16, i16* %78, i32 %77
-  %80 = bitcast i16* %79 to <16 x i16>*
-  %81 = load <16 x i16>, <16 x i16>* %80, align 2
+  %78 = bitcast ptr %inputTwo.host to ptr
+  %79 = getelementptr inbounds i16, ptr %78, i32 %77
+  %80 = bitcast ptr %79 to ptr
+  %81 = load <16 x i16>, ptr %80, align 2
   %82 = add nsw i32 %76, -17
-  %83 = getelementptr inbounds i16, i16* %78, i32 %82
-  %84 = bitcast i16* %83 to <16 x i16>*
-  %85 = load <16 x i16>, <16 x i16>* %84, align 2
+  %83 = getelementptr inbounds i16, ptr %78, i32 %82
+  %84 = bitcast ptr %83 to ptr
+  %85 = load <16 x i16>, ptr %84, align 2
   %86 = shufflevector <16 x i16> %81, <16 x i16> %85, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
   %87 = add nsw i32 %76, -31
-  %88 = getelementptr inbounds i16, i16* %78, i32 %87
-  %89 = bitcast i16* %88 to <16 x i16>*
-  %90 = load <16 x i16>, <16 x i16>* %89, align 2
+  %88 = getelementptr inbounds i16, ptr %78, i32 %87
+  %89 = bitcast ptr %88 to ptr
+  %90 = load <16 x i16>, ptr %89, align 2
   %91 = add nsw i32 %76, -16
-  %92 = getelementptr inbounds i16, i16* %78, i32 %91
-  %93 = bitcast i16* %92 to <16 x i16>*
-  %94 = load <16 x i16>, <16 x i16>* %93, align 2
+  %92 = getelementptr inbounds i16, ptr %78, i32 %91
+  %93 = bitcast ptr %92 to ptr
+  %94 = load <16 x i16>, ptr %93, align 2
   %95 = shufflevector <16 x i16> %90, <16 x i16> %94, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
   %96 = shufflevector <16 x i16> %86, <16 x i16> %95, <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
   %97 = bitcast <32 x i16> %96 to <16 x i32>
   %98 = add nsw i32 %testOne.extent.0, -16
-  %99 = bitcast i8* %testOne.host to i32*
-  %100 = getelementptr inbounds i32, i32* %99, i32 %98
+  %99 = bitcast ptr %testOne.host to ptr
+  %100 = getelementptr inbounds i32, ptr %99, i32 %98
   %101 = shufflevector <16 x i16> %62, <16 x i16> %66, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
   %102 = shufflevector <16 x i16> %101, <16 x i16> %75, <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
   %103 = bitcast <32 x i16> %102 to <16 x i32>
   %104 = tail call <16 x i32> @llvm.hexagon.V6.vdmpyhvsat(<16 x i32> %103, <16 x i32> %97)
-  %105 = bitcast i32* %100 to <16 x i32>*
-  store <16 x i32> %104, <16 x i32>* %105, align 4, !tbaa !10
+  %105 = bitcast ptr %100 to ptr
+  store <16 x i32> %104, ptr %105, align 4, !tbaa !10
   br label %destructor_block
 
 destructor_block:                                 ; preds = %"for testOne.s0.x.x44.preheader", %"end for testOne.s0.x.x"

diff  --git a/llvm/test/CodeGen/Hexagon/vect-regpairs.ll b/llvm/test/CodeGen/Hexagon/vect-regpairs.ll
index f21290c0fbcb0..039b9d297c548 100644
--- a/llvm/test/CodeGen/Hexagon/vect-regpairs.ll
+++ b/llvm/test/CodeGen/Hexagon/vect-regpairs.ll
@@ -24,28 +24,28 @@ declare <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32>, <16 x i32>, i32)
 declare <16 x i32> @llvm.hexagon.V6.vshuffob(<16 x i32>, <16 x i32>)
 
 
-define void @Gaussian7x7u8PerRow(i8* %src, i32 %stride, i32 %width, i8* %dst) #0 {
+define void @Gaussian7x7u8PerRow(ptr %src, i32 %stride, i32 %width, ptr %dst) #0 {
 entry:
   %mul = mul i32 %stride, 3
   %idx.neg = sub i32 0, %mul
-  %add.ptr = getelementptr i8, i8* %src, i32 %idx.neg
-  bitcast i8* %add.ptr to <16 x i32>*
+  %add.ptr = getelementptr i8, ptr %src, i32 %idx.neg
+  bitcast ptr %add.ptr to ptr
   %mul1 = shl i32 %stride, 1
   %idx.neg2 = sub i32 0, %mul1
-  %add.ptr3 = getelementptr i8, i8* %src, i32 %idx.neg2
-  bitcast i8* %add.ptr3 to <16 x i32>*
+  %add.ptr3 = getelementptr i8, ptr %src, i32 %idx.neg2
+  bitcast ptr %add.ptr3 to ptr
   %idx.neg5 = sub i32 0, %stride
-  %add.ptr6 = getelementptr i8, i8* %src, i32 %idx.neg5
-  bitcast i8* %add.ptr6 to <16 x i32>*
-  bitcast i8* %src to <16 x i32>*
-  %add.ptr10 = getelementptr i8, i8* %src, i32 %stride
-  bitcast i8* %add.ptr10 to <16 x i32>*
-  %add.ptr12 = getelementptr i8, i8* %src, i32 %mul1
-  bitcast i8* %add.ptr12 to <16 x i32>*
-  %add.ptr14 = getelementptr i8, i8* %src, i32 %mul
-  bitcast i8* %add.ptr14 to <16 x i32>*
-  bitcast i8* %dst to <16 x i32>*
-  load <16 x i32>, <16 x i32>* %0load <16 x i32>, <16 x i32>* %1load <16 x i32>, <16 x i32>* %2load <16 x i32>, <16 x i32>* %3load <16 x i32>, <16 x i32>* %4load <16 x i32>, <16 x i32>* %5load <16 x i32>, <16 x i32>* %6call <16 x i32> @llvm.hexagon.V6.vd0()
+  %add.ptr6 = getelementptr i8, ptr %src, i32 %idx.neg5
+  bitcast ptr %add.ptr6 to ptr
+  bitcast ptr %src to ptr
+  %add.ptr10 = getelementptr i8, ptr %src, i32 %stride
+  bitcast ptr %add.ptr10 to ptr
+  %add.ptr12 = getelementptr i8, ptr %src, i32 %mul1
+  bitcast ptr %add.ptr12 to ptr
+  %add.ptr14 = getelementptr i8, ptr %src, i32 %mul
+  bitcast ptr %add.ptr14 to ptr
+  bitcast ptr %dst to ptr
+  load <16 x i32>, ptr %0load <16 x i32>, ptr %1load <16 x i32>, ptr %2load <16 x i32>, ptr %3load <16 x i32>, ptr %4load <16 x i32>, ptr %5load <16 x i32>, ptr %6call <16 x i32> @llvm.hexagon.V6.vd0()
   call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %15, <16 x i32> %15)
   call <32 x i32> @llvm.hexagon.V6.vaddubh(<16 x i32> %14, <16 x i32> %8)
   call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %13, <16 x i32> %9)
@@ -55,26 +55,26 @@ entry:
   call <32 x i32> @llvm.hexagon.V6.vmpybus.acc(<32 x i32> %21, <16 x i32> %11, i32 336860180)
   %cmp155 = icmp sgt i32 %width, 64
   br i1 %cmp155, label %for.body.preheader, label %for.end
-for.body.preheader:                               %incdec.ptr20 = getelementptr i8, i8* %add.ptr14%23 = bitcast i8* %incdec.ptr20 to <16 x i32>*
-  %incdec.ptr19 = getelementptr i8, i8* %add.ptr12%24 = bitcast i8* %incdec.ptr19 to <16 x i32>*
-  %incdec.ptr18 = getelementptr i8, i8* %add.ptr10%25 = bitcast i8* %incdec.ptr18 to <16 x i32>*
-  %incdec.ptr17 = getelementptr i8, i8* %src%26 = bitcast i8* %incdec.ptr17 to <16 x i32>*
-  %incdec.ptr16 = getelementptr i8, i8* %add.ptr6%27 = bitcast i8* %incdec.ptr16 to <16 x i32>*
-  %incdec.ptr15 = getelementptr i8, i8* %add.ptr3%28 = bitcast i8* %incdec.ptr15 to <16 x i32>*
-  %incdec.ptr = getelementptr i8, i8* %add.ptr%29 = bitcast i8* %incdec.ptr to <16 x i32>*
+for.body.preheader:                               %incdec.ptr20 = getelementptr i8, ptr %add.ptr14%23 = bitcast ptr %incdec.ptr20 to ptr
+  %incdec.ptr19 = getelementptr i8, ptr %add.ptr12%24 = bitcast ptr %incdec.ptr19 to ptr
+  %incdec.ptr18 = getelementptr i8, ptr %add.ptr10%25 = bitcast ptr %incdec.ptr18 to ptr
+  %incdec.ptr17 = getelementptr i8, ptr %src%26 = bitcast ptr %incdec.ptr17 to ptr
+  %incdec.ptr16 = getelementptr i8, ptr %add.ptr6%27 = bitcast ptr %incdec.ptr16 to ptr
+  %incdec.ptr15 = getelementptr i8, ptr %add.ptr3%28 = bitcast ptr %incdec.ptr15 to ptr
+  %incdec.ptr = getelementptr i8, ptr %add.ptr%29 = bitcast ptr %incdec.ptr to ptr
   br label %for.body
-for.body:                                         %optr.0166 = phi <16 x i32>* [ %incdec.ptr28, %for.body ], [ %7, %for.body.preheader ]
-  %iptr6.0165 = phi <16 x i32>* [ %incdec.ptr27, %for.body ], [ %23, %for.body.preheader ]
-  %iptr5.0164 = phi <16 x i32>* [ %incdec.ptr26, %for.body ], [ %24, %for.body.preheader ]
-  %iptr4.0163 = phi <16 x i32>* [ %incdec.ptr25, %for.body ], [ %25, %for.body.preheader ]
-  %iptr3.0162 = phi <16 x i32>* [ %incdec.ptr24, %for.body ], [ %26, %for.body.preheader ]
-  %iptr2.0161 = phi <16 x i32>* [ %incdec.ptr23, %for.body ], [ %27, %for.body.preheader ]
-  %iptr1.0160 = phi <16 x i32>* [ %incdec.ptr22, %for.body ], [ %28, %for.body.preheader ]
-  %iptr0.0159 = phi <16 x i32>* [ %incdec.ptr21, %for.body ], [ %29, %for.body.preheader ]
+for.body:                                         %optr.0166 = phi ptr [ %incdec.ptr28, %for.body ], [ %7, %for.body.preheader ]
+  %iptr6.0165 = phi ptr [ %incdec.ptr27, %for.body ], [ %23, %for.body.preheader ]
+  %iptr5.0164 = phi ptr [ %incdec.ptr26, %for.body ], [ %24, %for.body.preheader ]
+  %iptr4.0163 = phi ptr [ %incdec.ptr25, %for.body ], [ %25, %for.body.preheader ]
+  %iptr3.0162 = phi ptr [ %incdec.ptr24, %for.body ], [ %26, %for.body.preheader ]
+  %iptr2.0161 = phi ptr [ %incdec.ptr23, %for.body ], [ %27, %for.body.preheader ]
+  %iptr1.0160 = phi ptr [ %incdec.ptr22, %for.body ], [ %28, %for.body.preheader ]
+  %iptr0.0159 = phi ptr [ %incdec.ptr21, %for.body ], [ %29, %for.body.preheader ]
   %dXV1.0158 = phi <32 x i32> [ %49, %for.body ], [ %22, %for.body.preheader ]
   %dXV0.0157 = phi <32 x i32> [ %dXV1.0158, %for.body ], [ %16, %for.body.preheader ]
   %i.0156 = phi i32 [ %sub, %for.body ], [ %width, %for.body.preheader ]
-  %incdec.ptr21 = getelementptr <16 x i32>, <16 x i32>* %iptr0.0159%30 = load <16 x i32>, <16 x i32>* %iptr0.0159%incdec.ptr22 = getelementptr <16 x i32>, <16 x i32>* %iptr1.0160%31 = load <16 x i32>, <16 x i32>* %iptr1.0160%incdec.ptr23 = getelementptr <16 x i32>, <16 x i32>* %iptr2.0161%32 = load <16 x i32>, <16 x i32>* %iptr2.0161%incdec.ptr24 = getelementptr <16 x i32>, <16 x i32>* %iptr3.0162%33 = load <16 x i32>, <16 x i32>* %iptr3.0162%incdec.ptr25 = getelementptr <16 x i32>, <16 x i32>* %iptr4.0163%34 = load <16 x i32>, <16 x i32>* %iptr4.0163%incdec.ptr26 = getelementptr <16 x i32>, <16 x i32>* %iptr5.0164%35 = load <16 x i32>, <16 x i32>* %iptr5.0164%incdec.ptr27 = getelementptr <16 x i32>, <16 x i32>* %iptr6.0165%36 = load <16 x i32>, <16 x i32>* %iptr6.0165, !tbaa !8
+  %incdec.ptr21 = getelementptr <16 x i32>, ptr %iptr0.0159%30 = load <16 x i32>, ptr %iptr0.0159%incdec.ptr22 = getelementptr <16 x i32>, ptr %iptr1.0160%31 = load <16 x i32>, ptr %iptr1.0160%incdec.ptr23 = getelementptr <16 x i32>, ptr %iptr2.0161%32 = load <16 x i32>, ptr %iptr2.0161%incdec.ptr24 = getelementptr <16 x i32>, ptr %iptr3.0162%33 = load <16 x i32>, ptr %iptr3.0162%incdec.ptr25 = getelementptr <16 x i32>, ptr %iptr4.0163%34 = load <16 x i32>, ptr %iptr4.0163%incdec.ptr26 = getelementptr <16 x i32>, ptr %iptr5.0164%35 = load <16 x i32>, ptr %iptr5.0164%incdec.ptr27 = getelementptr <16 x i32>, ptr %iptr6.0165%36 = load <16 x i32>, ptr %iptr6.0165, !tbaa !8
   call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %dXV1.0158)
   call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %dXV0.0157)
   call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %37, <16 x i32> %38, i32 2)
@@ -114,8 +114,8 @@ for.body:                                         %optr.0166 = phi <16 x i32>* [
   call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %68)
   call <16 x i32> @llvm.hexagon.V6.vasrwh(<16 x i32> %72, <16 x i32> %73, i32 12)
   call <16 x i32> @llvm.hexagon.V6.vshuffeb(<16 x i32> %74, <16 x i32> %71)
-  %incdec.ptr28 = getelementptr <16 x i32>, <16 x i32>* %1
-  store <16 x i32> %75, <16 x i32>* %optr.0166%sub = add i32 %i.0156, -64
+  %incdec.ptr28 = getelementptr <16 x i32>, ptr %1
+  store <16 x i32> %75, ptr %optr.0166%sub = add i32 %i.0156, -64
   %cmp = icmp sgt i32 %sub, 64
   br i1 %cmp, label %for.body, label %for.end
 for.end:                                          ret void

diff  --git a/llvm/test/CodeGen/Hexagon/vect_setcc_v2i16.ll b/llvm/test/CodeGen/Hexagon/vect_setcc_v2i16.ll
index a8625e65edfbe..61171b22f3420 100644
--- a/llvm/test/CodeGen/Hexagon/vect_setcc_v2i16.ll
+++ b/llvm/test/CodeGen/Hexagon/vect_setcc_v2i16.ll
@@ -7,44 +7,44 @@ target triple = "hexagon"
 @g0 = internal unnamed_addr global [24 x i16] zeroinitializer, align 8
 
 ; Function Attrs: nounwind
-define void @f0(i16* nocapture %a0) #0 {
+define void @f0(ptr nocapture %a0) #0 {
 b0:
   %v0 = alloca [128 x i16], align 8
   %v1 = alloca [16 x i16], align 8
-  %v2 = bitcast [128 x i16]* %v0 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 256, i8* %v2) #2
-  %v3 = getelementptr [128 x i16], [128 x i16]* %v0, i32 0, i32 80
+  %v2 = bitcast ptr %v0 to ptr
+  call void @llvm.lifetime.start.p0(i64 256, ptr %v2) #2
+  %v3 = getelementptr [128 x i16], ptr %v0, i32 0, i32 80
   br label %b8
 
 b1:                                               ; preds = %b3
   br label %b2
 
 b2:                                               ; preds = %b4, %b1
-  call void @llvm.lifetime.end.p0i8(i64 256, i8* %v2) #2
+  call void @llvm.lifetime.end.p0(i64 256, ptr %v2) #2
   ret void
 
 b3:                                               ; preds = %b5, %b3
-  %v4 = phi i16* [ %v26, %b5 ], [ %v9, %b3 ]
+  %v4 = phi ptr [ %v26, %b5 ], [ %v9, %b3 ]
   %v5 = phi i32 [ 0, %b5 ], [ %v7, %b3 ]
-  %v6 = bitcast i16* %v4 to <4 x i16>*
-  store <4 x i16> <i16 1, i16 1, i16 1, i16 1>, <4 x i16>* %v6, align 8
+  %v6 = bitcast ptr %v4 to ptr
+  store <4 x i16> <i16 1, i16 1, i16 1, i16 1>, ptr %v6, align 8
   %v7 = add nsw i32 %v5, 4
   %v8 = icmp slt i32 %v5, 12
-  %v9 = getelementptr i16, i16* %v4, i32 4
+  %v9 = getelementptr i16, ptr %v4, i32 4
   br i1 %v8, label %b3, label %b1
 
 b4:                                               ; preds = %b6
-  %v10 = getelementptr [16 x i16], [16 x i16]* %v1, i32 0, i32 13
-  %v11 = bitcast i16* %v10 to <2 x i16>*
-  %v12 = load <2 x i16>, <2 x i16>* %v11, align 2
+  %v10 = getelementptr [16 x i16], ptr %v1, i32 0, i32 13
+  %v11 = bitcast ptr %v10 to ptr
+  %v12 = load <2 x i16>, ptr %v11, align 2
   %v13 = icmp sgt <2 x i16> %v12, <i16 11, i16 11>
   %v14 = zext <2 x i1> %v13 to <2 x i32>
   %v15 = add <2 x i32> %v39, %v14
   %v16 = add <2 x i32> %v15, %v40
   %v17 = extractelement <2 x i32> %v16, i32 0
   %v18 = extractelement <2 x i32> %v16, i32 1
-  %v19 = getelementptr [16 x i16], [16 x i16]* %v1, i32 0, i32 15
-  %v20 = load i16, i16* %v19, align 2
+  %v19 = getelementptr [16 x i16], ptr %v1, i32 0, i32 15
+  %v20 = load i16, ptr %v19, align 2
   %v21 = icmp sgt i16 %v20, 11
   %v22 = zext i1 %v21 to i32
   %v23 = add i32 %v18, %v22
@@ -53,16 +53,16 @@ b4:                                               ; preds = %b6
   br i1 %v25, label %b5, label %b2
 
 b5:                                               ; preds = %b4
-  %v26 = getelementptr [16 x i16], [16 x i16]* %v1, i32 0, i32 0
+  %v26 = getelementptr [16 x i16], ptr %v1, i32 0, i32 0
   br label %b3
 
 b6:                                               ; preds = %b7, %b6
   %v27 = phi <2 x i32> [ zeroinitializer, %b7 ], [ %v40, %b6 ]
   %v28 = phi <2 x i32> [ zeroinitializer, %b7 ], [ %v39, %b6 ]
-  %v29 = phi i16* [ %v44, %b7 ], [ %v43, %b6 ]
+  %v29 = phi ptr [ %v44, %b7 ], [ %v43, %b6 ]
   %v30 = phi i32 [ 0, %b7 ], [ %v41, %b6 ]
-  %v31 = bitcast i16* %v29 to <4 x i16>*
-  %v32 = load <4 x i16>, <4 x i16>* %v31, align 2
+  %v31 = bitcast ptr %v29 to ptr
+  %v32 = load <4 x i16>, ptr %v31, align 2
   %v33 = icmp sgt <4 x i16> %v32, <i16 11, i16 11, i16 11, i16 11>
   %v34 = zext <4 x i1> %v33 to <4 x i16>
   %v35 = shufflevector <4 x i16> %v34, <4 x i16> undef, <2 x i32> <i32 2, i32 3>
@@ -73,33 +73,33 @@ b6:                                               ; preds = %b7, %b6
   %v40 = add <2 x i32> %v27, %v38
   %v41 = add nsw i32 %v30, 4
   %v42 = icmp slt i32 %v30, 4
-  %v43 = getelementptr i16, i16* %v29, i32 4
+  %v43 = getelementptr i16, ptr %v29, i32 4
   br i1 %v42, label %b6, label %b4
 
 b7:                                               ; preds = %b8
-  %v44 = getelementptr [16 x i16], [16 x i16]* %v1, i32 0, i32 5
+  %v44 = getelementptr [16 x i16], ptr %v1, i32 0, i32 5
   br label %b6
 
 b8:                                               ; preds = %b8, %b0
-  %v45 = phi i16* [ %v3, %b0 ], [ %v53, %b8 ]
-  %v46 = phi i16* [ getelementptr inbounds ([24 x i16], [24 x i16]* @g0, i32 0, i32 0), %b0 ], [ %v54, %b8 ]
+  %v45 = phi ptr [ %v3, %b0 ], [ %v53, %b8 ]
+  %v46 = phi ptr [ @g0, %b0 ], [ %v54, %b8 ]
   %v47 = phi i32 [ 0, %b0 ], [ %v51, %b8 ]
-  %v48 = bitcast i16* %v45 to <4 x i16>*
-  %v49 = load <4 x i16>, <4 x i16>* %v48, align 8
-  %v50 = bitcast i16* %v46 to <4 x i16>*
-  store <4 x i16> %v49, <4 x i16>* %v50, align 8
+  %v48 = bitcast ptr %v45 to ptr
+  %v49 = load <4 x i16>, ptr %v48, align 8
+  %v50 = bitcast ptr %v46 to ptr
+  store <4 x i16> %v49, ptr %v50, align 8
   %v51 = add nsw i32 %v47, 4
   %v52 = icmp slt i32 %v47, 20
-  %v53 = getelementptr i16, i16* %v45, i32 4
-  %v54 = getelementptr i16, i16* %v46, i32 4
+  %v53 = getelementptr i16, ptr %v45, i32 4
+  %v54 = getelementptr i16, ptr %v46, i32 4
   br i1 %v52, label %b8, label %b7
 }
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
 
 attributes #0 = { nounwind "target-cpu"="hexagonv55" }
 attributes #1 = { argmemonly nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/vmpa-halide-test.ll b/llvm/test/CodeGen/Hexagon/vmpa-halide-test.ll
index 8b207ba4f2389..a71200296d1ce 100644
--- a/llvm/test/CodeGen/Hexagon/vmpa-halide-test.ll
+++ b/llvm/test/CodeGen/Hexagon/vmpa-halide-test.ll
@@ -4,58 +4,58 @@
 
 target triple = "hexagon-unknown--elf"
 
-%struct.buffer_t = type { i64, i8*, [4 x i32], [4 x i32], [4 x i32], i32, i8, i8, [6 x i8] }
+%struct.buffer_t = type { i64, ptr, [4 x i32], [4 x i32], [4 x i32], i32, i8, i8, [6 x i8] }
 
 ; Function Attrs: norecurse nounwind
-define i32 @__testOne(%struct.buffer_t* noalias nocapture readonly %inputOne.buffer, %struct.buffer_t* noalias nocapture readonly %inputTwo.buffer, %struct.buffer_t* noalias nocapture readonly %testOne.buffer) #0 {
+define i32 @__testOne(ptr noalias nocapture readonly %inputOne.buffer, ptr noalias nocapture readonly %inputTwo.buffer, ptr noalias nocapture readonly %testOne.buffer) #0 {
 entry:
-  %buf_host = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %inputOne.buffer, i32 0, i32 1
-  %inputOne.host = load i8*, i8** %buf_host, align 4
-  %buf_min = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %inputOne.buffer, i32 0, i32 4, i32 0
-  %inputOne.min.0 = load i32, i32* %buf_min, align 4
-  %buf_host10 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %inputTwo.buffer, i32 0, i32 1
-  %inputTwo.host = load i8*, i8** %buf_host10, align 4
-  %buf_min22 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %inputTwo.buffer, i32 0, i32 4, i32 0
-  %inputTwo.min.0 = load i32, i32* %buf_min22, align 4
-  %buf_host27 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %testOne.buffer, i32 0, i32 1
-  %testOne.host = load i8*, i8** %buf_host27, align 4
-  %buf_extent31 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %testOne.buffer, i32 0, i32 2, i32 0
-  %testOne.extent.0 = load i32, i32* %buf_extent31, align 4
-  %buf_min39 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %testOne.buffer, i32 0, i32 4, i32 0
-  %testOne.min.0 = load i32, i32* %buf_min39, align 4
+  %buf_host = getelementptr inbounds %struct.buffer_t, ptr %inputOne.buffer, i32 0, i32 1
+  %inputOne.host = load ptr, ptr %buf_host, align 4
+  %buf_min = getelementptr inbounds %struct.buffer_t, ptr %inputOne.buffer, i32 0, i32 4, i32 0
+  %inputOne.min.0 = load i32, ptr %buf_min, align 4
+  %buf_host10 = getelementptr inbounds %struct.buffer_t, ptr %inputTwo.buffer, i32 0, i32 1
+  %inputTwo.host = load ptr, ptr %buf_host10, align 4
+  %buf_min22 = getelementptr inbounds %struct.buffer_t, ptr %inputTwo.buffer, i32 0, i32 4, i32 0
+  %inputTwo.min.0 = load i32, ptr %buf_min22, align 4
+  %buf_host27 = getelementptr inbounds %struct.buffer_t, ptr %testOne.buffer, i32 0, i32 1
+  %testOne.host = load ptr, ptr %buf_host27, align 4
+  %buf_extent31 = getelementptr inbounds %struct.buffer_t, ptr %testOne.buffer, i32 0, i32 2, i32 0
+  %testOne.extent.0 = load i32, ptr %buf_extent31, align 4
+  %buf_min39 = getelementptr inbounds %struct.buffer_t, ptr %testOne.buffer, i32 0, i32 4, i32 0
+  %testOne.min.0 = load i32, ptr %buf_min39, align 4
   %0 = ashr i32 %testOne.extent.0, 6
   %1 = icmp sgt i32 %0, 0
   br i1 %1, label %"for testOne.s0.x.x.preheader", label %"end for testOne.s0.x.x"
 
 "for testOne.s0.x.x.preheader":                   ; preds = %entry
-  %2 = bitcast i8* %testOne.host to i16*
+  %2 = bitcast ptr %testOne.host to ptr
   br label %"for testOne.s0.x.x"
 
 "for testOne.s0.x.x":                             ; preds = %"for testOne.s0.x.x", %"for testOne.s0.x.x.preheader"
-  %.phi = phi i16* [ %2, %"for testOne.s0.x.x.preheader" ], [ %.inc, %"for testOne.s0.x.x" ]
+  %.phi = phi ptr [ %2, %"for testOne.s0.x.x.preheader" ], [ %.inc, %"for testOne.s0.x.x" ]
   %testOne.s0.x.x = phi i32 [ 0, %"for testOne.s0.x.x.preheader" ], [ %38, %"for testOne.s0.x.x" ]
   %3 = shl nsw i32 %testOne.s0.x.x, 6
   %4 = add nsw i32 %3, %testOne.min.0
   %5 = shl nsw i32 %4, 1
   %6 = sub nsw i32 %5, %inputOne.min.0
-  %7 = getelementptr inbounds i8, i8* %inputOne.host, i32 %6
-  %8 = bitcast i8* %7 to <64 x i8>*
-  %9 = load <64 x i8>, <64 x i8>* %8, align 1, !tbaa !5
+  %7 = getelementptr inbounds i8, ptr %inputOne.host, i32 %6
+  %8 = bitcast ptr %7 to ptr
+  %9 = load <64 x i8>, ptr %8, align 1, !tbaa !5
   %10 = add nsw i32 %6, 64
-  %11 = getelementptr inbounds i8, i8* %inputOne.host, i32 %10
-  %12 = bitcast i8* %11 to <64 x i8>*
-  %13 = load <64 x i8>, <64 x i8>* %12, align 1, !tbaa !5
+  %11 = getelementptr inbounds i8, ptr %inputOne.host, i32 %10
+  %12 = bitcast ptr %11 to ptr
+  %13 = load <64 x i8>, ptr %12, align 1, !tbaa !5
   %14 = shufflevector <64 x i8> %9, <64 x i8> %13, <64 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30, i32 32, i32 34, i32 36, i32 38, i32 40, i32 42, i32 44, i32 46, i32 48, i32 50, i32 52, i32 54, i32 56, i32 58, i32 60, i32 62, i32 64, i32 66, i32 68, i32 70, i32 72, i32 74, i32 76, i32 78, i32 80, i32 82, i32 84, i32 86, i32 88, i32 90, i32 92, i32 94, i32 96, i32 98, i32 100, i32 102, i32 104, i32 106, i32 108, i32 110, i32 112, i32 114, i32 116, i32 118, i32 120, i32 122, i32 124, i32 126>
   %15 = shufflevector <64 x i8> %9, <64 x i8> %13, <64 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31, i32 33, i32 35, i32 37, i32 39, i32 41, i32 43, i32 45, i32 47, i32 49, i32 51, i32 53, i32 55, i32 57, i32 59, i32 61, i32 63, i32 65, i32 67, i32 69, i32 71, i32 73, i32 75, i32 77, i32 79, i32 81, i32 83, i32 85, i32 87, i32 89, i32 91, i32 93, i32 95, i32 97, i32 99, i32 101, i32 103, i32 105, i32 107, i32 109, i32 111, i32 113, i32 115, i32 117, i32 119, i32 121, i32 123, i32 125, i32 127>
   %16 = shufflevector <64 x i8> %14, <64 x i8> %15, <128 x i32> <i32 0, i32 64, i32 1, i32 65, i32 2, i32 66, i32 3, i32 67, i32 4, i32 68, i32 5, i32 69, i32 6, i32 70, i32 7, i32 71, i32 8, i32 72, i32 9, i32 73, i32 10, i32 74, i32 11, i32 75, i32 12, i32 76, i32 13, i32 77, i32 14, i32 78, i32 15, i32 79, i32 16, i32 80, i32 17, i32 81, i32 18, i32 82, i32 19, i32 83, i32 20, i32 84, i32 21, i32 85, i32 22, i32 86, i32 23, i32 87, i32 24, i32 88, i32 25, i32 89, i32 26, i32 90, i32 27, i32 91, i32 28, i32 92, i32 29, i32 93, i32 30, i32 94, i32 31, i32 95, i32 32, i32 96, i32 33, i32 97, i32 34, i32 98, i32 35, i32 99, i32 36, i32 100, i32 37, i32 101, i32 38, i32 102, i32 39, i32 103, i32 40, i32 104, i32 41, i32 105, i32 42, i32 106, i32 43, i32 107, i32 44, i32 108, i32 45, i32 109, i32 46, i32 110, i32 47, i32 111, i32 48, i32 112, i32 49, i32 113, i32 50, i32 114, i32 51, i32 115, i32 52, i32 116, i32 53, i32 117, i32 54, i32 118, i32 55, i32 119, i32 56, i32 120, i32 57, i32 121, i32 58, i32 122, i32 59, i32 123, i32 60, i32 124, i32 61, i32 125, i32 62, i32 126, i32 63, i32 127>
   %17 = sub nsw i32 %5, %inputTwo.min.0
-  %18 = getelementptr inbounds i8, i8* %inputTwo.host, i32 %17
-  %19 = bitcast i8* %18 to <64 x i8>*
-  %20 = load <64 x i8>, <64 x i8>* %19, align 1, !tbaa !8
+  %18 = getelementptr inbounds i8, ptr %inputTwo.host, i32 %17
+  %19 = bitcast ptr %18 to ptr
+  %20 = load <64 x i8>, ptr %19, align 1, !tbaa !8
   %21 = add nsw i32 %17, 64
-  %22 = getelementptr inbounds i8, i8* %inputTwo.host, i32 %21
-  %23 = bitcast i8* %22 to <64 x i8>*
-  %24 = load <64 x i8>, <64 x i8>* %23, align 1, !tbaa !8
+  %22 = getelementptr inbounds i8, ptr %inputTwo.host, i32 %21
+  %23 = bitcast ptr %22 to ptr
+  %24 = load <64 x i8>, ptr %23, align 1, !tbaa !8
   %25 = shufflevector <64 x i8> %20, <64 x i8> %24, <64 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30, i32 32, i32 34, i32 36, i32 38, i32 40, i32 42, i32 44, i32 46, i32 48, i32 50, i32 52, i32 54, i32 56, i32 58, i32 60, i32 62, i32 64, i32 66, i32 68, i32 70, i32 72, i32 74, i32 76, i32 78, i32 80, i32 82, i32 84, i32 86, i32 88, i32 90, i32 92, i32 94, i32 96, i32 98, i32 100, i32 102, i32 104, i32 106, i32 108, i32 110, i32 112, i32 114, i32 116, i32 118, i32 120, i32 122, i32 124, i32 126>
   %26 = shufflevector <64 x i8> %20, <64 x i8> %24, <64 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31, i32 33, i32 35, i32 37, i32 39, i32 41, i32 43, i32 45, i32 47, i32 49, i32 51, i32 53, i32 55, i32 57, i32 59, i32 61, i32 63, i32 65, i32 67, i32 69, i32 71, i32 73, i32 75, i32 77, i32 79, i32 81, i32 83, i32 85, i32 87, i32 89, i32 91, i32 93, i32 95, i32 97, i32 99, i32 101, i32 103, i32 105, i32 107, i32 109, i32 111, i32 113, i32 115, i32 117, i32 119, i32 121, i32 123, i32 125, i32 127>
   %27 = shufflevector <64 x i8> %25, <64 x i8> %26, <128 x i32> <i32 0, i32 64, i32 1, i32 65, i32 2, i32 66, i32 3, i32 67, i32 4, i32 68, i32 5, i32 69, i32 6, i32 70, i32 7, i32 71, i32 8, i32 72, i32 9, i32 73, i32 10, i32 74, i32 11, i32 75, i32 12, i32 76, i32 13, i32 77, i32 14, i32 78, i32 15, i32 79, i32 16, i32 80, i32 17, i32 81, i32 18, i32 82, i32 19, i32 83, i32 20, i32 84, i32 21, i32 85, i32 22, i32 86, i32 23, i32 87, i32 24, i32 88, i32 25, i32 89, i32 26, i32 90, i32 27, i32 91, i32 28, i32 92, i32 29, i32 93, i32 30, i32 94, i32 31, i32 95, i32 32, i32 96, i32 33, i32 97, i32 34, i32 98, i32 35, i32 99, i32 36, i32 100, i32 37, i32 101, i32 38, i32 102, i32 39, i32 103, i32 40, i32 104, i32 41, i32 105, i32 42, i32 106, i32 43, i32 107, i32 44, i32 108, i32 45, i32 109, i32 46, i32 110, i32 47, i32 111, i32 48, i32 112, i32 49, i32 113, i32 50, i32 114, i32 51, i32 115, i32 52, i32 116, i32 53, i32 117, i32 54, i32 118, i32 55, i32 119, i32 56, i32 120, i32 57, i32 121, i32 58, i32 122, i32 59, i32 123, i32 60, i32 124, i32 61, i32 125, i32 62, i32 126, i32 63, i32 127>
@@ -64,16 +64,16 @@ entry:
   %30 = tail call <32 x i32> @llvm.hexagon.V6.vmpabuuv(<32 x i32> %28, <32 x i32> %29)
   %31 = bitcast <32 x i32> %30 to <64 x i16>
   %32 = shufflevector <64 x i16> %31, <64 x i16> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-  %33 = bitcast i16* %.phi to <32 x i16>*
-  store <32 x i16> %32, <32 x i16>* %33, align 2, !tbaa !10
+  %33 = bitcast ptr %.phi to ptr
+  store <32 x i16> %32, ptr %33, align 2, !tbaa !10
   %34 = shufflevector <64 x i16> %31, <64 x i16> undef, <32 x i32> <i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
   %35 = or i32 %3, 32
-  %36 = getelementptr inbounds i16, i16* %2, i32 %35
-  %37 = bitcast i16* %36 to <32 x i16>*
-  store <32 x i16> %34, <32 x i16>* %37, align 2, !tbaa !10
+  %36 = getelementptr inbounds i16, ptr %2, i32 %35
+  %37 = bitcast ptr %36 to ptr
+  store <32 x i16> %34, ptr %37, align 2, !tbaa !10
   %38 = add nuw nsw i32 %testOne.s0.x.x, 1
   %39 = icmp eq i32 %38, %0
-  %.inc = getelementptr i16, i16* %.phi, i32 64
+  %.inc = getelementptr i16, ptr %.phi, i32 64
   br i1 %39, label %"end for testOne.s0.x.x", label %"for testOne.s0.x.x"
 
 "end for testOne.s0.x.x":                         ; preds = %"for testOne.s0.x.x", %entry
@@ -87,25 +87,25 @@ entry:
   %44 = shl nsw i32 %43, 1
   %45 = sub nsw i32 %44, %inputOne.min.0
   %46 = add nsw i32 %45, -128
-  %47 = getelementptr inbounds i8, i8* %inputOne.host, i32 %46
-  %48 = bitcast i8* %47 to <64 x i8>*
-  %49 = load <64 x i8>, <64 x i8>* %48, align 1
+  %47 = getelementptr inbounds i8, ptr %inputOne.host, i32 %46
+  %48 = bitcast ptr %47 to ptr
+  %49 = load <64 x i8>, ptr %48, align 1
   %50 = add nsw i32 %45, -64
-  %51 = getelementptr inbounds i8, i8* %inputOne.host, i32 %50
-  %52 = bitcast i8* %51 to <64 x i8>*
-  %53 = load <64 x i8>, <64 x i8>* %52, align 1
+  %51 = getelementptr inbounds i8, ptr %inputOne.host, i32 %50
+  %52 = bitcast ptr %51 to ptr
+  %53 = load <64 x i8>, ptr %52, align 1
   %54 = shufflevector <64 x i8> %49, <64 x i8> %53, <64 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30, i32 32, i32 34, i32 36, i32 38, i32 40, i32 42, i32 44, i32 46, i32 48, i32 50, i32 52, i32 54, i32 56, i32 58, i32 60, i32 62, i32 64, i32 66, i32 68, i32 70, i32 72, i32 74, i32 76, i32 78, i32 80, i32 82, i32 84, i32 86, i32 88, i32 90, i32 92, i32 94, i32 96, i32 98, i32 100, i32 102, i32 104, i32 106, i32 108, i32 110, i32 112, i32 114, i32 116, i32 118, i32 120, i32 122, i32 124, i32 126>
   %55 = shufflevector <64 x i8> %49, <64 x i8> %53, <64 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31, i32 33, i32 35, i32 37, i32 39, i32 41, i32 43, i32 45, i32 47, i32 49, i32 51, i32 53, i32 55, i32 57, i32 59, i32 61, i32 63, i32 65, i32 67, i32 69, i32 71, i32 73, i32 75, i32 77, i32 79, i32 81, i32 83, i32 85, i32 87, i32 89, i32 91, i32 93, i32 95, i32 97, i32 99, i32 101, i32 103, i32 105, i32 107, i32 109, i32 111, i32 113, i32 115, i32 117, i32 119, i32 121, i32 123, i32 125, i32 127>
   %56 = shufflevector <64 x i8> %54, <64 x i8> %55, <128 x i32> <i32 0, i32 64, i32 1, i32 65, i32 2, i32 66, i32 3, i32 67, i32 4, i32 68, i32 5, i32 69, i32 6, i32 70, i32 7, i32 71, i32 8, i32 72, i32 9, i32 73, i32 10, i32 74, i32 11, i32 75, i32 12, i32 76, i32 13, i32 77, i32 14, i32 78, i32 15, i32 79, i32 16, i32 80, i32 17, i32 81, i32 18, i32 82, i32 19, i32 83, i32 20, i32 84, i32 21, i32 85, i32 22, i32 86, i32 23, i32 87, i32 24, i32 88, i32 25, i32 89, i32 26, i32 90, i32 27, i32 91, i32 28, i32 92, i32 29, i32 93, i32 30, i32 94, i32 31, i32 95, i32 32, i32 96, i32 33, i32 97, i32 34, i32 98, i32 35, i32 99, i32 36, i32 100, i32 37, i32 101, i32 38, i32 102, i32 39, i32 103, i32 40, i32 104, i32 41, i32 105, i32 42, i32 106, i32 43, i32 107, i32 44, i32 108, i32 45, i32 109, i32 46, i32 110, i32 47, i32 111, i32 48, i32 112, i32 49, i32 113, i32 50, i32 114, i32 51, i32 115, i32 52, i32 116, i32 53, i32 117, i32 54, i32 118, i32 55, i32 119, i32 56, i32 120, i32 57, i32 121, i32 58, i32 122, i32 59, i32 123, i32 60, i32 124, i32 61, i32 125, i32 62, i32 126, i32 63, i32 127>
   %57 = sub nsw i32 %44, %inputTwo.min.0
   %58 = add nsw i32 %57, -128
-  %59 = getelementptr inbounds i8, i8* %inputTwo.host, i32 %58
-  %60 = bitcast i8* %59 to <64 x i8>*
-  %61 = load <64 x i8>, <64 x i8>* %60, align 1
+  %59 = getelementptr inbounds i8, ptr %inputTwo.host, i32 %58
+  %60 = bitcast ptr %59 to ptr
+  %61 = load <64 x i8>, ptr %60, align 1
   %62 = add nsw i32 %57, -64
-  %63 = getelementptr inbounds i8, i8* %inputTwo.host, i32 %62
-  %64 = bitcast i8* %63 to <64 x i8>*
-  %65 = load <64 x i8>, <64 x i8>* %64, align 1
+  %63 = getelementptr inbounds i8, ptr %inputTwo.host, i32 %62
+  %64 = bitcast ptr %63 to ptr
+  %65 = load <64 x i8>, ptr %64, align 1
   %66 = shufflevector <64 x i8> %61, <64 x i8> %65, <64 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30, i32 32, i32 34, i32 36, i32 38, i32 40, i32 42, i32 44, i32 46, i32 48, i32 50, i32 52, i32 54, i32 56, i32 58, i32 60, i32 62, i32 64, i32 66, i32 68, i32 70, i32 72, i32 74, i32 76, i32 78, i32 80, i32 82, i32 84, i32 86, i32 88, i32 90, i32 92, i32 94, i32 96, i32 98, i32 100, i32 102, i32 104, i32 106, i32 108, i32 110, i32 112, i32 114, i32 116, i32 118, i32 120, i32 122, i32 124, i32 126>
   %67 = shufflevector <64 x i8> %61, <64 x i8> %65, <64 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31, i32 33, i32 35, i32 37, i32 39, i32 41, i32 43, i32 45, i32 47, i32 49, i32 51, i32 53, i32 55, i32 57, i32 59, i32 61, i32 63, i32 65, i32 67, i32 69, i32 71, i32 73, i32 75, i32 77, i32 79, i32 81, i32 83, i32 85, i32 87, i32 89, i32 91, i32 93, i32 95, i32 97, i32 99, i32 101, i32 103, i32 105, i32 107, i32 109, i32 111, i32 113, i32 115, i32 117, i32 119, i32 121, i32 123, i32 125, i32 127>
   %68 = shufflevector <64 x i8> %66, <64 x i8> %67, <128 x i32> <i32 0, i32 64, i32 1, i32 65, i32 2, i32 66, i32 3, i32 67, i32 4, i32 68, i32 5, i32 69, i32 6, i32 70, i32 7, i32 71, i32 8, i32 72, i32 9, i32 73, i32 10, i32 74, i32 11, i32 75, i32 12, i32 76, i32 13, i32 77, i32 14, i32 78, i32 15, i32 79, i32 16, i32 80, i32 17, i32 81, i32 18, i32 82, i32 19, i32 83, i32 20, i32 84, i32 21, i32 85, i32 22, i32 86, i32 23, i32 87, i32 24, i32 88, i32 25, i32 89, i32 26, i32 90, i32 27, i32 91, i32 28, i32 92, i32 29, i32 93, i32 30, i32 94, i32 31, i32 95, i32 32, i32 96, i32 33, i32 97, i32 34, i32 98, i32 35, i32 99, i32 36, i32 100, i32 37, i32 101, i32 38, i32 102, i32 39, i32 103, i32 40, i32 104, i32 41, i32 105, i32 42, i32 106, i32 43, i32 107, i32 44, i32 108, i32 45, i32 109, i32 46, i32 110, i32 47, i32 111, i32 48, i32 112, i32 49, i32 113, i32 50, i32 114, i32 51, i32 115, i32 52, i32 116, i32 53, i32 117, i32 54, i32 118, i32 55, i32 119, i32 56, i32 120, i32 57, i32 121, i32 58, i32 122, i32 59, i32 123, i32 60, i32 124, i32 61, i32 125, i32 62, i32 126, i32 63, i32 127>
@@ -114,16 +114,16 @@ entry:
   %71 = tail call <32 x i32> @llvm.hexagon.V6.vmpabuuv(<32 x i32> %69, <32 x i32> %70)
   %72 = bitcast <32 x i32> %71 to <64 x i16>
   %73 = add nsw i32 %testOne.extent.0, -64
-  %74 = bitcast i8* %testOne.host to i16*
-  %75 = getelementptr inbounds i16, i16* %74, i32 %73
-  %76 = bitcast i16* %75 to <32 x i16>*
+  %74 = bitcast ptr %testOne.host to ptr
+  %75 = getelementptr inbounds i16, ptr %74, i32 %73
+  %76 = bitcast ptr %75 to ptr
   %77 = add nsw i32 %testOne.extent.0, -32
-  %78 = getelementptr inbounds i16, i16* %74, i32 %77
+  %78 = getelementptr inbounds i16, ptr %74, i32 %77
   %79 = shufflevector <64 x i16> %72, <64 x i16> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   %80 = shufflevector <64 x i16> %72, <64 x i16> undef, <32 x i32> <i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
-  %81 = bitcast i16* %78 to <32 x i16>*
-  store <32 x i16> %79, <32 x i16>* %76, align 2, !tbaa !10
-  store <32 x i16> %80, <32 x i16>* %81, align 2, !tbaa !10
+  %81 = bitcast ptr %78 to ptr
+  store <32 x i16> %79, ptr %76, align 2, !tbaa !10
+  store <32 x i16> %80, ptr %81, align 2, !tbaa !10
   br label %destructor_block
 
 destructor_block:                                 ; preds = %"for testOne.s0.x.x44.preheader", %"end for testOne.s0.x.x"

diff  --git a/llvm/test/CodeGen/Lanai/codemodel.ll b/llvm/test/CodeGen/Lanai/codemodel.ll
index 205f08d4d1478..75559c99bb80e 100644
--- a/llvm/test/CodeGen/Lanai/codemodel.ll
+++ b/llvm/test/CodeGen/Lanai/codemodel.ll
@@ -16,7 +16,7 @@ entry:
 ; CHECK: mov hi(data), %r[[REGISTER:[0-9]+]]
 ; CHECK: or %r[[REGISTER]], lo(data), %r[[REGISTER]]
 ; CHECK: ld 0[%r[[REGISTER]]], %rv
-	%0 = load i32, i32* getelementptr ([0 x i32], [0 x i32]* @data, i64 0, i64 0), align 4		; <i32> [#uses=1]
+	%0 = load i32, ptr @data, align 4		; <i32> [#uses=1]
 	ret i32 %0
 }
 
@@ -29,11 +29,11 @@ entry:
 ; CHECK: mov hi(data), %r[[REGISTER:[0-9]+]]
 ; CHECK: or %r[[REGISTER]], lo(data), %r[[REGISTER]]
 ; CHECK: ld 40[%r[[REGISTER]]], %rv
-	%0 = load i32, i32* getelementptr ([0 x i32], [0 x i32]* @data, i32 0, i64 10), align 4		; <i32> [#uses=1]
+	%0 = load i32, ptr getelementptr ([0 x i32], ptr @data, i32 0, i64 10), align 4		; <i32> [#uses=1]
 	ret i32 %0
 }
 
- at y = local_unnamed_addr global i32* null, section ".ldata,block", align 8
+ at y = local_unnamed_addr global ptr null, section ".ldata,block", align 8
 
 define i32 @foo2() nounwind readonly {
 entry:
@@ -43,7 +43,7 @@ entry:
 ; CHECK-LABEL:  foo2:
 ; CHECK: mov hi(y), %r[[REGISTER:[0-9]+]]
 ; CHECK: or %r[[REGISTER]], lo(y), %r[[REGISTER]]
-  %0 = load i32*, i32** @y, align 8
-  %1 = load i32, i32* %0, align 4
+  %0 = load ptr, ptr @y, align 8
+  %1 = load i32, ptr %0, align 4
   ret i32 %1
 }

diff  --git a/llvm/test/CodeGen/Lanai/inlineasm-output-template.ll b/llvm/test/CodeGen/Lanai/inlineasm-output-template.ll
index 9dc875c9b8dfe..d588ea13e75cc 100644
--- a/llvm/test/CodeGen/Lanai/inlineasm-output-template.ll
+++ b/llvm/test/CodeGen/Lanai/inlineasm-output-template.ll
@@ -13,7 +13,7 @@ define dso_local i32 @test_inlineasm_c_output_template0() {
 ; CHECK: !TEST baz
 @baz = internal global i32 0, align 4
 define dso_local i32 @test_inlineasm_c_output_template1() {
-  tail call void asm sideeffect "!TEST ${0:c}", "i"(i32* nonnull @baz)
+  tail call void asm sideeffect "!TEST ${0:c}", "i"(ptr nonnull @baz)
   ret i32 42
 }
 

diff  --git a/llvm/test/CodeGen/Lanai/lanai-misched-trivial-disjoint.ll b/llvm/test/CodeGen/Lanai/lanai-misched-trivial-disjoint.ll
index 2ff11e650453c..49008f8449716 100644
--- a/llvm/test/CodeGen/Lanai/lanai-misched-trivial-disjoint.ll
+++ b/llvm/test/CodeGen/Lanai/lanai-misched-trivial-disjoint.ll
@@ -5,27 +5,27 @@
 ; are trivially disjoint.
 
 ; Function Attrs: norecurse nounwind uwtable
-define i32 @foo(i8* inreg nocapture %x) {
+define i32 @foo(ptr inreg nocapture %x) {
 entry:
-  %0 = bitcast i8* %x to i32*
-  store i32 1, i32* %0, align 4
-  %arrayidx1 = getelementptr inbounds i8, i8* %x, i32 4
-  %1 = bitcast i8* %arrayidx1 to i32*
-  store i32 2, i32* %1, align 4
-  %arrayidx2 = getelementptr inbounds i8, i8* %x, i32 12
-  %2 = bitcast i8* %arrayidx2 to i32*
-  %3 = load i32, i32* %2, align 4
-  %arrayidx3 = getelementptr inbounds i8, i8* %x, i32 10
-  %4 = bitcast i8* %arrayidx3 to i16*
-  store i16 3, i16* %4, align 2
-  %5 = bitcast i8* %arrayidx2 to i16*
-  store i16 4, i16* %5, align 2
-  %arrayidx5 = getelementptr inbounds i8, i8* %x, i32 14
-  store i8 5, i8* %arrayidx5, align 1
-  %arrayidx6 = getelementptr inbounds i8, i8* %x, i32 15
-  store i8 6, i8* %arrayidx6, align 1
-  %arrayidx7 = getelementptr inbounds i8, i8* %x, i32 16
-  store i8 7, i8* %arrayidx7, align 1
+  %0 = bitcast ptr %x to ptr
+  store i32 1, ptr %0, align 4
+  %arrayidx1 = getelementptr inbounds i8, ptr %x, i32 4
+  %1 = bitcast ptr %arrayidx1 to ptr
+  store i32 2, ptr %1, align 4
+  %arrayidx2 = getelementptr inbounds i8, ptr %x, i32 12
+  %2 = bitcast ptr %arrayidx2 to ptr
+  %3 = load i32, ptr %2, align 4
+  %arrayidx3 = getelementptr inbounds i8, ptr %x, i32 10
+  %4 = bitcast ptr %arrayidx3 to ptr
+  store i16 3, ptr %4, align 2
+  %5 = bitcast ptr %arrayidx2 to ptr
+  store i16 4, ptr %5, align 2
+  %arrayidx5 = getelementptr inbounds i8, ptr %x, i32 14
+  store i8 5, ptr %arrayidx5, align 1
+  %arrayidx6 = getelementptr inbounds i8, ptr %x, i32 15
+  store i8 6, ptr %arrayidx6, align 1
+  %arrayidx7 = getelementptr inbounds i8, ptr %x, i32 16
+  store i8 7, ptr %arrayidx7, align 1
   ret i32 %3
 }
 

diff  --git a/llvm/test/CodeGen/Lanai/mem_alu_combiner.ll b/llvm/test/CodeGen/Lanai/mem_alu_combiner.ll
index 2dbe1268958d4..a36742b50746c 100644
--- a/llvm/test/CodeGen/Lanai/mem_alu_combiner.ll
+++ b/llvm/test/CodeGen/Lanai/mem_alu_combiner.ll
@@ -7,7 +7,7 @@
 ; CHECK-DIS-LABEL: sum,
 ; CHECK-DIS-NOT: ++],
 
-define i32 @sum(i32* inreg nocapture readonly %data, i32 inreg %n) {
+define i32 @sum(ptr inreg nocapture readonly %data, i32 inreg %n) {
 entry:
   %cmp6 = icmp sgt i32 %n, 0
   br i1 %cmp6, label %for.body.preheader, label %for.cond.cleanup
@@ -26,8 +26,8 @@ for.cond.cleanup:                                 ; preds = %for.cond.cleanup.lo
 for.body:                                         ; preds = %for.body.preheader, %for.body
   %i.08 = phi i32 [ %inc, %for.body ], [ 0, %for.body.preheader ]
   %sum_.07 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ]
-  %arrayidx = getelementptr inbounds i32, i32* %data, i32 %i.08
-  %0 = load i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %data, i32 %i.08
+  %0 = load i32, ptr %arrayidx, align 4
   %add = add nsw i32 %0, %sum_.07
   %inc = add nuw nsw i32 %i.08, 1
   %exitcond = icmp eq i32 %inc, %n

diff  --git a/llvm/test/CodeGen/Lanai/peephole-compare.mir b/llvm/test/CodeGen/Lanai/peephole-compare.mir
index 63a30448198b7..10735170ca34c 100644
--- a/llvm/test/CodeGen/Lanai/peephole-compare.mir
+++ b/llvm/test/CodeGen/Lanai/peephole-compare.mir
@@ -130,14 +130,14 @@
 
   define void @testBB() {
   entry:
-    %0 = load i32, i32* @a, align 4, !tbaa !0
-    %1 = load i32, i32* @b, align 4, !tbaa !0
+    %0 = load i32, ptr @a, align 4, !tbaa !0
+    %1 = load i32, ptr @b, align 4, !tbaa !0
     %sub.i = sub i32 %1, %0
     %tobool = icmp sgt i32 %sub.i, -1
     br i1 %tobool, label %if.end, label %if.then
 
   if.then:                                          ; preds = %entry
-    %call1 = tail call i32 bitcast (i32 (...)* @g to i32 ()*)()
+    %call1 = tail call i32 @g()
     br label %while.body
 
   while.body:                                       ; preds = %while.body, %if.then
@@ -148,7 +148,7 @@
     br i1 %cmp.i, label %if.then4, label %if.end7
 
   if.then4:                                         ; preds = %if.end
-    %call5 = tail call i32 bitcast (i32 (...)* @g to i32 ()*)()
+    %call5 = tail call i32 @g()
     br label %while.body6
 
   while.body6:                                      ; preds = %while.body6, %if.then4
@@ -161,7 +161,7 @@
   declare i32 @g(...)
 
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #0
+  declare void @llvm.stackprotector(ptr, ptr) #0
 
   attributes #0 = { nounwind }
 

diff  --git a/llvm/test/CodeGen/Lanai/set_and_hi.ll b/llvm/test/CodeGen/Lanai/set_and_hi.ll
index bfce094050cbd..2eee7517318ed 100644
--- a/llvm/test/CodeGen/Lanai/set_and_hi.ll
+++ b/llvm/test/CodeGen/Lanai/set_and_hi.ll
@@ -10,6 +10,6 @@ target triple = "lanai"
 ; CHECK-LABEL: setandhi:
 ; CHECK: mov 0xfffffe4a, %r{{[0-9]+}}
 define void @setandhi() #0 {
-  store volatile i32 -438, i32* @x, align 4
+  store volatile i32 -438, ptr @x, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/Lanai/sub-cmp-peephole.ll b/llvm/test/CodeGen/Lanai/sub-cmp-peephole.ll
index 84d23e4e28da7..d79db0e6fbaa9 100644
--- a/llvm/test/CodeGen/Lanai/sub-cmp-peephole.ll
+++ b/llvm/test/CodeGen/Lanai/sub-cmp-peephole.ll
@@ -129,7 +129,7 @@ define i32 @cmp_ult0(i32 inreg %a, i32 inreg %b, i32 inreg %x, i32 inreg %y) {
 ; CHECK-NEXT:    bt exit
 ; CHECK-NEXT:    nop
 entry:
-  %load = load i32, i32* @t, align 4
+  %load = load i32, ptr @t, align 4
   %sub = sub i32 %load, 17
   %cmp = icmp ult i32 %sub, 0
   br i1 %cmp, label %if.then, label %if.else
@@ -169,7 +169,7 @@ define i32 @cmp_gt0(i32 inreg %a, i32 inreg %b, i32 inreg %x, i32 inreg %y) {
 ; CHECK-NEXT:    bt exit
 ; CHECK-NEXT:    nop
 entry:
-  %load = load i32, i32* @t, align 4
+  %load = load i32, ptr @t, align 4
   %sub = sub i32 %load, 17
   %cmp = icmp sgt i32 %sub, 0
   br i1 %cmp, label %if.then, label %if.else

diff  --git a/llvm/test/CodeGen/Lanai/subword.ll b/llvm/test/CodeGen/Lanai/subword.ll
index c0e1eaf6ad361..6da3b5dfefecf 100644
--- a/llvm/test/CodeGen/Lanai/subword.ll
+++ b/llvm/test/CodeGen/Lanai/subword.ll
@@ -4,16 +4,16 @@
 
 %struct.X = type { i16, i16 }
 
-define void @f(%struct.X* inreg nocapture %c) #0 {
+define void @f(ptr inreg nocapture %c) #0 {
 entry:
-  %a = getelementptr inbounds %struct.X, %struct.X* %c, i32 0, i32 0
-  %0 = load i16, i16* %a, align 2
+  %a = getelementptr inbounds %struct.X, ptr %c, i32 0, i32 0
+  %0 = load i16, ptr %a, align 2
   %inc = add i16 %0, 1
-  store i16 %inc, i16* %a, align 2
-  %b = getelementptr inbounds %struct.X, %struct.X* %c, i32 0, i32 1
-  %1 = load i16, i16* %b, align 2
+  store i16 %inc, ptr %a, align 2
+  %b = getelementptr inbounds %struct.X, ptr %c, i32 0, i32 1
+  %1 = load i16, ptr %b, align 2
   %dec = add i16 %1, -1
-  store i16 %dec, i16* %b, align 2
+  store i16 %dec, ptr %b, align 2
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/LoongArch/frame.ll b/llvm/test/CodeGen/LoongArch/frame.ll
index 2a9700522219c..8d3133316c43d 100644
--- a/llvm/test/CodeGen/LoongArch/frame.ll
+++ b/llvm/test/CodeGen/LoongArch/frame.ll
@@ -3,7 +3,7 @@
 
 %struct.key_t = type { i32, [16 x i8] }
 
-declare void @llvm.memset.p0i8.i64(ptr, i8, i64, i1)
+declare void @llvm.memset.p0.i64(ptr, i8, i64, i1)
 declare void @test1(ptr)
 
 define i32 @test() nounwind {
@@ -21,7 +21,7 @@ define i32 @test() nounwind {
 ; CHECK-NEXT:    addi.d $sp, $sp, 32
 ; CHECK-NEXT:    ret
   %key = alloca %struct.key_t, align 4
-  call void @llvm.memset.p0i8.i64(ptr %key, i8 0, i64 20, i1 false)
+  call void @llvm.memset.p0.i64(ptr %key, i8 0, i64 20, i1 false)
   %1 = getelementptr inbounds %struct.key_t, ptr %key, i64 0, i32 1, i64 0
   call void @test1(ptr %1)
   ret i32 0

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ld-invalid-imm.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ld-invalid-imm.ll
index 20dd8a45d7f02..27c690c91aecb 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ld-invalid-imm.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ld-invalid-imm.ll
@@ -1,17 +1,17 @@
 ; RUN: not llc --mtriple=loongarch64 --mattr=+lasx < %s 2>&1 | FileCheck %s
 
-declare <32 x i8> @llvm.loongarch.lasx.xvld(i8*, i32)
+declare <32 x i8> @llvm.loongarch.lasx.xvld(ptr, i32)
 
-define <32 x i8> @lasx_xvld_lo(i8* %p) nounwind {
+define <32 x i8> @lasx_xvld_lo(ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lasx.xvld: argument out of range
 entry:
-  %res = call <32 x i8> @llvm.loongarch.lasx.xvld(i8* %p, i32 -2049)
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvld(ptr %p, i32 -2049)
   ret <32 x i8> %res
 }
 
-define <32 x i8> @lasx_xvld_hi(i8* %p) nounwind {
+define <32 x i8> @lasx_xvld_hi(ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lasx.xvld: argument out of range
 entry:
-  %res = call <32 x i8> @llvm.loongarch.lasx.xvld(i8* %p, i32 2048)
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvld(ptr %p, i32 2048)
   ret <32 x i8> %res
 }

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ld-non-imm.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ld-non-imm.ll
index b23436a448323..1d8d5c764ce8b 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ld-non-imm.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ld-non-imm.ll
@@ -1,10 +1,10 @@
 ; RUN: not llc --mtriple=loongarch64 --mattr=+lasx < %s 2>&1 | FileCheck %s
 
-declare <32 x i8> @llvm.loongarch.lasx.xvld(i8*, i32)
+declare <32 x i8> @llvm.loongarch.lasx.xvld(ptr, i32)
 
-define <32 x i8> @lasx_xvld(i8* %p, i32 %a) nounwind {
+define <32 x i8> @lasx_xvld(ptr %p, i32 %a) nounwind {
 ; CHECK: immarg operand has non-immediate parameter
 entry:
-  %res = call <32 x i8> @llvm.loongarch.lasx.xvld(i8* %p, i32 %a)
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvld(ptr %p, i32 %a)
   ret <32 x i8> %res
 }

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ld.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ld.ll
index 5ffc629db4668..e8b0aeb9bbcf8 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ld.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ld.ll
@@ -1,26 +1,26 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
 
-declare <32 x i8> @llvm.loongarch.lasx.xvld(i8*, i32)
+declare <32 x i8> @llvm.loongarch.lasx.xvld(ptr, i32)
 
-define <32 x i8> @lasx_xvld(i8* %p) nounwind {
+define <32 x i8> @lasx_xvld(ptr %p) nounwind {
 ; CHECK-LABEL: lasx_xvld:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xvld $xr0, $a0, 1
 ; CHECK-NEXT:    ret
 entry:
-  %res = call <32 x i8> @llvm.loongarch.lasx.xvld(i8* %p, i32 1)
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvld(ptr %p, i32 1)
   ret <32 x i8> %res
 }
 
-declare <32 x i8> @llvm.loongarch.lasx.xvldx(i8*, i64)
+declare <32 x i8> @llvm.loongarch.lasx.xvldx(ptr, i64)
 
-define <32 x i8> @lasx_xvldx(i8* %p, i64 %b) nounwind {
+define <32 x i8> @lasx_xvldx(ptr %p, i64 %b) nounwind {
 ; CHECK-LABEL: lasx_xvldx:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xvldx $xr0, $a0, $a1
 ; CHECK-NEXT:    ret
 entry:
-  %res = call <32 x i8> @llvm.loongarch.lasx.xvldx(i8* %p, i64 %b)
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvldx(ptr %p, i64 %b)
   ret <32 x i8> %res
 }

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ldrepl-invalid-imm.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ldrepl-invalid-imm.ll
index cb62a839985a3..6fe6de82e1c0f 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ldrepl-invalid-imm.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ldrepl-invalid-imm.ll
@@ -1,65 +1,65 @@
 ; RUN: not llc --mtriple=loongarch64 --mattr=+lasx < %s 2>&1 | FileCheck %s
 
-declare <32 x i8> @llvm.loongarch.lasx.xvldrepl.b(i8*, i32)
+declare <32 x i8> @llvm.loongarch.lasx.xvldrepl.b(ptr, i32)
 
-define <32 x i8> @lasx_xvldrepl_b_lo(i8* %p) nounwind {
+define <32 x i8> @lasx_xvldrepl_b_lo(ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lasx.xvldrepl.b: argument out of range
 entry:
-  %res = call <32 x i8> @llvm.loongarch.lasx.xvldrepl.b(i8* %p, i32 -2049)
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvldrepl.b(ptr %p, i32 -2049)
   ret <32 x i8> %res
 }
 
-define <32 x i8> @lasx_xvldrepl_b_hi(i8* %p) nounwind {
+define <32 x i8> @lasx_xvldrepl_b_hi(ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lasx.xvldrepl.b: argument out of range
 entry:
-  %res = call <32 x i8> @llvm.loongarch.lasx.xvldrepl.b(i8* %p, i32 2048)
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvldrepl.b(ptr %p, i32 2048)
   ret <32 x i8> %res
 }
 
-declare <16 x i16> @llvm.loongarch.lasx.xvldrepl.h(i8*, i32)
+declare <16 x i16> @llvm.loongarch.lasx.xvldrepl.h(ptr, i32)
 
-define <16 x i16> @lasx_xvldrepl_h_lo(i8* %p) nounwind {
+define <16 x i16> @lasx_xvldrepl_h_lo(ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lasx.xvldrepl.h: argument out of range or not a multiple of 2.
 entry:
-  %res = call <16 x i16> @llvm.loongarch.lasx.xvldrepl.h(i8* %p, i32 -2050)
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvldrepl.h(ptr %p, i32 -2050)
   ret <16 x i16> %res
 }
 
-define <16 x i16> @lasx_xvldrepl_h_hi(i8* %p) nounwind {
+define <16 x i16> @lasx_xvldrepl_h_hi(ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lasx.xvldrepl.h: argument out of range or not a multiple of 2.
 entry:
-  %res = call <16 x i16> @llvm.loongarch.lasx.xvldrepl.h(i8* %p, i32 2048)
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvldrepl.h(ptr %p, i32 2048)
   ret <16 x i16> %res
 }
 
-declare <8 x i32> @llvm.loongarch.lasx.xvldrepl.w(i8*, i32)
+declare <8 x i32> @llvm.loongarch.lasx.xvldrepl.w(ptr, i32)
 
-define <8 x i32> @lasx_xvldrepl_w_lo(i8* %p) nounwind {
+define <8 x i32> @lasx_xvldrepl_w_lo(ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lasx.xvldrepl.w: argument out of range or not a multiple of 4.
 entry:
-  %res = call <8 x i32> @llvm.loongarch.lasx.xvldrepl.w(i8* %p, i32 -2052)
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvldrepl.w(ptr %p, i32 -2052)
   ret <8 x i32> %res
 }
 
-define <8 x i32> @lasx_xvldrepl_w_hi(i8* %p) nounwind {
+define <8 x i32> @lasx_xvldrepl_w_hi(ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lasx.xvldrepl.w: argument out of range or not a multiple of 4.
 entry:
-  %res = call <8 x i32> @llvm.loongarch.lasx.xvldrepl.w(i8* %p, i32 2048)
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvldrepl.w(ptr %p, i32 2048)
   ret <8 x i32> %res
 }
 
-declare <4 x i64> @llvm.loongarch.lasx.xvldrepl.d(i8*, i32)
+declare <4 x i64> @llvm.loongarch.lasx.xvldrepl.d(ptr, i32)
 
-define <4 x i64> @lasx_xvldrepl_d_lo(i8* %p) nounwind {
+define <4 x i64> @lasx_xvldrepl_d_lo(ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lasx.xvldrepl.d: argument out of range or not a multiple of 8.
 entry:
-  %res = call <4 x i64> @llvm.loongarch.lasx.xvldrepl.d(i8* %p, i32 -2056)
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvldrepl.d(ptr %p, i32 -2056)
   ret <4 x i64> %res
 }
 
-define <4 x i64> @lasx_xvldrepl_d_hi(i8* %p) nounwind {
+define <4 x i64> @lasx_xvldrepl_d_hi(ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lasx.xvldrepl.d: argument out of range or not a multiple of 8.
 entry:
-  %res = call <4 x i64> @llvm.loongarch.lasx.xvldrepl.d(i8* %p, i32 2048)
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvldrepl.d(ptr %p, i32 2048)
   ret <4 x i64> %res
 }

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ldrepl-non-imm.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ldrepl-non-imm.ll
index 075d663b0dd7a..74c22298db500 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ldrepl-non-imm.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ldrepl-non-imm.ll
@@ -1,37 +1,37 @@
 ; RUN: not llc --mtriple=loongarch64 --mattr=+lasx < %s 2>&1 | FileCheck %s
 
-declare <32 x i8> @llvm.loongarch.lasx.xvldrepl.b(i8*, i32)
+declare <32 x i8> @llvm.loongarch.lasx.xvldrepl.b(ptr, i32)
 
-define <32 x i8> @lasx_xvldrepl_b(i8* %p, i32 %a) nounwind {
+define <32 x i8> @lasx_xvldrepl_b(ptr %p, i32 %a) nounwind {
 ; CHECK: immarg operand has non-immediate parameter
 entry:
-  %res = call <32 x i8> @llvm.loongarch.lasx.xvldrepl.b(i8* %p, i32 %a)
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvldrepl.b(ptr %p, i32 %a)
   ret <32 x i8> %res
 }
 
-declare <16 x i16> @llvm.loongarch.lasx.xvldrepl.h(i8*, i32)
+declare <16 x i16> @llvm.loongarch.lasx.xvldrepl.h(ptr, i32)
 
-define <16 x i16> @lasx_xvldrepl_h(i8* %p, i32 %a) nounwind {
+define <16 x i16> @lasx_xvldrepl_h(ptr %p, i32 %a) nounwind {
 ; CHECK: immarg operand has non-immediate parameter
 entry:
-  %res = call <16 x i16> @llvm.loongarch.lasx.xvldrepl.h(i8* %p, i32 %a)
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvldrepl.h(ptr %p, i32 %a)
   ret <16 x i16> %res
 }
 
-declare <8 x i32> @llvm.loongarch.lasx.xvldrepl.w(i8*, i32)
+declare <8 x i32> @llvm.loongarch.lasx.xvldrepl.w(ptr, i32)
 
-define <8 x i32> @lasx_xvldrepl_w(i8* %p, i32 %a) nounwind {
+define <8 x i32> @lasx_xvldrepl_w(ptr %p, i32 %a) nounwind {
 ; CHECK: immarg operand has non-immediate parameter
 entry:
-  %res = call <8 x i32> @llvm.loongarch.lasx.xvldrepl.w(i8* %p, i32 %a)
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvldrepl.w(ptr %p, i32 %a)
   ret <8 x i32> %res
 }
 
-declare <4 x i64> @llvm.loongarch.lasx.xvldrepl.d(i8*, i32)
+declare <4 x i64> @llvm.loongarch.lasx.xvldrepl.d(ptr, i32)
 
-define <4 x i64> @lasx_xvldrepl_d(i8* %p, i32 %a) nounwind {
+define <4 x i64> @lasx_xvldrepl_d(ptr %p, i32 %a) nounwind {
 ; CHECK: immarg operand has non-immediate parameter
 entry:
-  %res = call <4 x i64> @llvm.loongarch.lasx.xvldrepl.d(i8* %p, i32 %a)
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvldrepl.d(ptr %p, i32 %a)
   ret <4 x i64> %res
 }

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ldrepl.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ldrepl.ll
index ae6abdf81cbc5..ccd969a9f2998 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ldrepl.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ldrepl.ll
@@ -1,50 +1,50 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
 
-declare <32 x i8> @llvm.loongarch.lasx.xvldrepl.b(i8*, i32)
+declare <32 x i8> @llvm.loongarch.lasx.xvldrepl.b(ptr, i32)
 
-define <32 x i8> @lasx_xvldrepl_b(i8* %p) nounwind {
+define <32 x i8> @lasx_xvldrepl_b(ptr %p) nounwind {
 ; CHECK-LABEL: lasx_xvldrepl_b:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xvldrepl.b $xr0, $a0, 1
 ; CHECK-NEXT:    ret
 entry:
-  %res = call <32 x i8> @llvm.loongarch.lasx.xvldrepl.b(i8* %p, i32 1)
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvldrepl.b(ptr %p, i32 1)
   ret <32 x i8> %res
 }
 
-declare <16 x i16> @llvm.loongarch.lasx.xvldrepl.h(i8*, i32)
+declare <16 x i16> @llvm.loongarch.lasx.xvldrepl.h(ptr, i32)
 
-define <16 x i16> @lasx_xvldrepl_h(i8* %p) nounwind {
+define <16 x i16> @lasx_xvldrepl_h(ptr %p) nounwind {
 ; CHECK-LABEL: lasx_xvldrepl_h:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xvldrepl.h $xr0, $a0, 2
 ; CHECK-NEXT:    ret
 entry:
-  %res = call <16 x i16> @llvm.loongarch.lasx.xvldrepl.h(i8* %p, i32 2)
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvldrepl.h(ptr %p, i32 2)
   ret <16 x i16> %res
 }
 
-declare <8 x i32> @llvm.loongarch.lasx.xvldrepl.w(i8*, i32)
+declare <8 x i32> @llvm.loongarch.lasx.xvldrepl.w(ptr, i32)
 
-define <8 x i32> @lasx_xvldrepl_w(i8* %p) nounwind {
+define <8 x i32> @lasx_xvldrepl_w(ptr %p) nounwind {
 ; CHECK-LABEL: lasx_xvldrepl_w:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xvldrepl.w $xr0, $a0, 4
 ; CHECK-NEXT:    ret
 entry:
-  %res = call <8 x i32> @llvm.loongarch.lasx.xvldrepl.w(i8* %p, i32 4)
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvldrepl.w(ptr %p, i32 4)
   ret <8 x i32> %res
 }
 
-declare <4 x i64> @llvm.loongarch.lasx.xvldrepl.d(i8*, i32)
+declare <4 x i64> @llvm.loongarch.lasx.xvldrepl.d(ptr, i32)
 
-define <4 x i64> @lasx_xvldrepl_d(i8* %p) nounwind {
+define <4 x i64> @lasx_xvldrepl_d(ptr %p) nounwind {
 ; CHECK-LABEL: lasx_xvldrepl_d:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xvldrepl.d $xr0, $a0, 8
 ; CHECK-NEXT:    ret
 entry:
-  %res = call <4 x i64> @llvm.loongarch.lasx.xvldrepl.d(i8* %p, i32 8)
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvldrepl.d(ptr %p, i32 8)
   ret <4 x i64> %res
 }

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-st-invalid-imm.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-st-invalid-imm.ll
index 0177f2b77b939..6108ae1883da5 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-st-invalid-imm.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-st-invalid-imm.ll
@@ -1,17 +1,17 @@
 ; RUN: not llc --mtriple=loongarch64 --mattr=+lasx < %s 2>&1 | FileCheck %s
 
-declare void @llvm.loongarch.lasx.xvst(<32 x i8>, i8*, i32)
+declare void @llvm.loongarch.lasx.xvst(<32 x i8>, ptr, i32)
 
-define void @lasx_xvst_lo(<32 x i8> %va, i8* %p) nounwind {
+define void @lasx_xvst_lo(<32 x i8> %va, ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lasx.xvst: argument out of range
 entry:
-  call void @llvm.loongarch.lasx.xvst(<32 x i8> %va, i8* %p, i32 -2049)
+  call void @llvm.loongarch.lasx.xvst(<32 x i8> %va, ptr %p, i32 -2049)
   ret void
 }
 
-define void @lasx_xvst_hi(<32 x i8> %va, i8* %p) nounwind {
+define void @lasx_xvst_hi(<32 x i8> %va, ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lasx.xvst: argument out of range
 entry:
-  call void @llvm.loongarch.lasx.xvst(<32 x i8> %va, i8* %p, i32 2048)
+  call void @llvm.loongarch.lasx.xvst(<32 x i8> %va, ptr %p, i32 2048)
   ret void
 }

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-st-non-imm.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-st-non-imm.ll
index c19207aad6b8c..969fb5765dd82 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-st-non-imm.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-st-non-imm.ll
@@ -1,10 +1,10 @@
 ; RUN: not llc --mtriple=loongarch64 --mattr=+lasx < %s 2>&1 | FileCheck %s
 
-declare void @llvm.loongarch.lasx.xvst(<32 x i8>, i8*, i32)
+declare void @llvm.loongarch.lasx.xvst(<32 x i8>, ptr, i32)
 
-define void @lasx_xvst(<32 x i8> %va, i8* %p, i32 %b) nounwind {
+define void @lasx_xvst(<32 x i8> %va, ptr %p, i32 %b) nounwind {
 ; CHECK: immarg operand has non-immediate parameter
 entry:
-  call void @llvm.loongarch.lasx.xvst(<32 x i8> %va, i8* %p, i32 %b)
+  call void @llvm.loongarch.lasx.xvst(<32 x i8> %va, ptr %p, i32 %b)
   ret void
 }

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-st.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-st.ll
index b69e7b813f0c1..060121df57c24 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-st.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-st.ll
@@ -1,27 +1,27 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
 
-declare void @llvm.loongarch.lasx.xvst(<32 x i8>, i8*, i32)
+declare void @llvm.loongarch.lasx.xvst(<32 x i8>, ptr, i32)
 
-define void @lasx_xvst(<32 x i8> %va, i8* %p) nounwind {
+define void @lasx_xvst(<32 x i8> %va, ptr %p) nounwind {
 ; CHECK-LABEL: lasx_xvst:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xvst $xr0, $a0, 1
 ; CHECK-NEXT:    ret
 entry:
-  call void @llvm.loongarch.lasx.xvst(<32 x i8> %va, i8* %p, i32 1)
+  call void @llvm.loongarch.lasx.xvst(<32 x i8> %va, ptr %p, i32 1)
   ret void
 }
 
-declare void @llvm.loongarch.lasx.xvstx(<32 x i8>, i8*, i64)
+declare void @llvm.loongarch.lasx.xvstx(<32 x i8>, ptr, i64)
 
-define void @lasx_xvstx(<32 x i8> %va, i8* %p) nounwind {
+define void @lasx_xvstx(<32 x i8> %va, ptr %p) nounwind {
 ; CHECK-LABEL: lasx_xvstx:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    ori $a1, $zero, 1
 ; CHECK-NEXT:    xvstx $xr0, $a0, $a1
 ; CHECK-NEXT:    ret
 entry:
-  call void @llvm.loongarch.lasx.xvstx(<32 x i8> %va, i8* %p, i64 1)
+  call void @llvm.loongarch.lasx.xvstx(<32 x i8> %va, ptr %p, i64 1)
   ret void
 }

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-stelm-invalid-imm.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-stelm-invalid-imm.ll
index 0ea2484e090df..4593de13fbff7 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-stelm-invalid-imm.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-stelm-invalid-imm.ll
@@ -1,121 +1,121 @@
 ; RUN: not llc --mtriple=loongarch64 --mattr=+lasx < %s 2>&1 | FileCheck %s
 
-declare void @llvm.loongarch.lasx.xvstelm.b(<32 x i8>, i8*, i32, i32)
+declare void @llvm.loongarch.lasx.xvstelm.b(<32 x i8>, ptr, i32, i32)
 
-define void @lasx_xvstelm_b_lo(<32 x i8> %va, i8* %p) nounwind {
+define void @lasx_xvstelm_b_lo(<32 x i8> %va, ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lasx.xvstelm.b: argument out of range
 entry:
-  call void @llvm.loongarch.lasx.xvstelm.b(<32 x i8> %va, i8* %p, i32 -129, i32 1)
+  call void @llvm.loongarch.lasx.xvstelm.b(<32 x i8> %va, ptr %p, i32 -129, i32 1)
   ret void
 }
 
-define void @lasx_xvstelm_b_hi(<32 x i8> %va, i8* %p) nounwind {
+define void @lasx_xvstelm_b_hi(<32 x i8> %va, ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lasx.xvstelm.b: argument out of range
 entry:
-  call void @llvm.loongarch.lasx.xvstelm.b(<32 x i8> %va, i8* %p, i32 128, i32 1)
+  call void @llvm.loongarch.lasx.xvstelm.b(<32 x i8> %va, ptr %p, i32 128, i32 1)
   ret void
 }
 
-define void @lasx_xvstelm_b_idx_lo(<32 x i8> %va, i8* %p) nounwind {
+define void @lasx_xvstelm_b_idx_lo(<32 x i8> %va, ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lasx.xvstelm.b: argument out of range
 entry:
-  call void @llvm.loongarch.lasx.xvstelm.b(<32 x i8> %va, i8* %p, i32 1, i32 -1)
+  call void @llvm.loongarch.lasx.xvstelm.b(<32 x i8> %va, ptr %p, i32 1, i32 -1)
   ret void
 }
 
-define void @lasx_xvstelm_b_idx_hi(<32 x i8> %va, i8* %p) nounwind {
+define void @lasx_xvstelm_b_idx_hi(<32 x i8> %va, ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lasx.xvstelm.b: argument out of range
 entry:
-  call void @llvm.loongarch.lasx.xvstelm.b(<32 x i8> %va, i8* %p, i32 1, i32 32)
+  call void @llvm.loongarch.lasx.xvstelm.b(<32 x i8> %va, ptr %p, i32 1, i32 32)
   ret void
 }
 
-declare void @llvm.loongarch.lasx.xvstelm.h(<16 x i16>, i8*, i32, i32)
+declare void @llvm.loongarch.lasx.xvstelm.h(<16 x i16>, ptr, i32, i32)
 
-define void @lasx_xvstelm_h_lo(<16 x i16> %va, i8* %p) nounwind {
+define void @lasx_xvstelm_h_lo(<16 x i16> %va, ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lasx.xvstelm.h: argument out of range or not a multiple of 2.
 entry:
-  call void @llvm.loongarch.lasx.xvstelm.h(<16 x i16> %va, i8* %p, i32 -258, i32 1)
+  call void @llvm.loongarch.lasx.xvstelm.h(<16 x i16> %va, ptr %p, i32 -258, i32 1)
   ret void
 }
 
-define void @lasx_xvstelm_h_hi(<16 x i16> %va, i8* %p) nounwind {
+define void @lasx_xvstelm_h_hi(<16 x i16> %va, ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lasx.xvstelm.h: argument out of range or not a multiple of 2.
 entry:
-  call void @llvm.loongarch.lasx.xvstelm.h(<16 x i16> %va, i8* %p, i32 256, i32 1)
+  call void @llvm.loongarch.lasx.xvstelm.h(<16 x i16> %va, ptr %p, i32 256, i32 1)
   ret void
 }
 
-define void @lasx_xvstelm_h_idx_lo(<16 x i16> %va, i8* %p) nounwind {
+define void @lasx_xvstelm_h_idx_lo(<16 x i16> %va, ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lasx.xvstelm.h: argument out of range or not a multiple of 2.
 entry:
-  call void @llvm.loongarch.lasx.xvstelm.h(<16 x i16> %va, i8* %p, i32 2, i32 -1)
+  call void @llvm.loongarch.lasx.xvstelm.h(<16 x i16> %va, ptr %p, i32 2, i32 -1)
   ret void
 }
 
-define void @lasx_xvstelm_h_idx_hi(<16 x i16> %va, i8* %p) nounwind {
+define void @lasx_xvstelm_h_idx_hi(<16 x i16> %va, ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lasx.xvstelm.h: argument out of range or not a multiple of 2.
 entry:
-  call void @llvm.loongarch.lasx.xvstelm.h(<16 x i16> %va, i8* %p, i32 2, i32 16)
+  call void @llvm.loongarch.lasx.xvstelm.h(<16 x i16> %va, ptr %p, i32 2, i32 16)
   ret void
 }
 
-declare void @llvm.loongarch.lasx.xvstelm.w(<8 x i32>, i8*, i32, i32)
+declare void @llvm.loongarch.lasx.xvstelm.w(<8 x i32>, ptr, i32, i32)
 
-define void @lasx_xvstelm_w_lo(<8 x i32> %va, i8* %p) nounwind {
+define void @lasx_xvstelm_w_lo(<8 x i32> %va, ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lasx.xvstelm.w: argument out of range or not a multiple of 4.
 entry:
-  call void @llvm.loongarch.lasx.xvstelm.w(<8 x i32> %va, i8* %p, i32 -516, i32 1)
+  call void @llvm.loongarch.lasx.xvstelm.w(<8 x i32> %va, ptr %p, i32 -516, i32 1)
   ret void
 }
 
-define void @lasx_xvstelm_w_hi(<8 x i32> %va, i8* %p) nounwind {
+define void @lasx_xvstelm_w_hi(<8 x i32> %va, ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lasx.xvstelm.w: argument out of range or not a multiple of 4.
 entry:
-  call void @llvm.loongarch.lasx.xvstelm.w(<8 x i32> %va, i8* %p, i32 512, i32 1)
+  call void @llvm.loongarch.lasx.xvstelm.w(<8 x i32> %va, ptr %p, i32 512, i32 1)
   ret void
 }
 
-define void @lasx_xvstelm_w_idx_lo(<8 x i32> %va, i8* %p) nounwind {
+define void @lasx_xvstelm_w_idx_lo(<8 x i32> %va, ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lasx.xvstelm.w: argument out of range or not a multiple of 4.
 entry:
-  call void @llvm.loongarch.lasx.xvstelm.w(<8 x i32> %va, i8* %p, i32 4, i32 -1)
+  call void @llvm.loongarch.lasx.xvstelm.w(<8 x i32> %va, ptr %p, i32 4, i32 -1)
   ret void
 }
 
-define void @lasx_xvstelm_w_idx_hi(<8 x i32> %va, i8* %p) nounwind {
+define void @lasx_xvstelm_w_idx_hi(<8 x i32> %va, ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lasx.xvstelm.w: argument out of range or not a multiple of 4.
 entry:
-  call void @llvm.loongarch.lasx.xvstelm.w(<8 x i32> %va, i8* %p, i32 4, i32 8)
+  call void @llvm.loongarch.lasx.xvstelm.w(<8 x i32> %va, ptr %p, i32 4, i32 8)
   ret void
 }
 
-declare void @llvm.loongarch.lasx.xvstelm.d(<4 x i64>, i8*, i32, i32)
+declare void @llvm.loongarch.lasx.xvstelm.d(<4 x i64>, ptr, i32, i32)
 
-define void @lasx_xvstelm_d_lo(<4 x i64> %va, i8* %p) nounwind {
+define void @lasx_xvstelm_d_lo(<4 x i64> %va, ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lasx.xvstelm.d: argument out of range or not a multiple of 8.
 entry:
-  call void @llvm.loongarch.lasx.xvstelm.d(<4 x i64> %va, i8* %p, i32 -1032, i32 1)
+  call void @llvm.loongarch.lasx.xvstelm.d(<4 x i64> %va, ptr %p, i32 -1032, i32 1)
   ret void
 }
 
-define void @lasx_xvstelm_d_hi(<4 x i64> %va, i8* %p) nounwind {
+define void @lasx_xvstelm_d_hi(<4 x i64> %va, ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lasx.xvstelm.d: argument out of range or not a multiple of 8.
 entry:
-  call void @llvm.loongarch.lasx.xvstelm.d(<4 x i64> %va, i8* %p, i32 1024, i32 1)
+  call void @llvm.loongarch.lasx.xvstelm.d(<4 x i64> %va, ptr %p, i32 1024, i32 1)
   ret void
 }
 
-define void @lasx_xvstelm_d_idx_lo(<4 x i64> %va, i8* %p) nounwind {
+define void @lasx_xvstelm_d_idx_lo(<4 x i64> %va, ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lasx.xvstelm.d: argument out of range or not a multiple of 8.
 entry:
-  call void @llvm.loongarch.lasx.xvstelm.d(<4 x i64> %va, i8* %p, i32 8, i32 -1)
+  call void @llvm.loongarch.lasx.xvstelm.d(<4 x i64> %va, ptr %p, i32 8, i32 -1)
   ret void
 }
 
-define void @lasx_xvstelm_d_idx_hi(<4 x i64> %va, i8* %p) nounwind {
+define void @lasx_xvstelm_d_idx_hi(<4 x i64> %va, ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lasx.xvstelm.d: argument out of range or not a multiple of 8.
 entry:
-  call void @llvm.loongarch.lasx.xvstelm.d(<4 x i64> %va, i8* %p, i32 8, i32 4)
+  call void @llvm.loongarch.lasx.xvstelm.d(<4 x i64> %va, ptr %p, i32 8, i32 4)
   ret void
 }

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-stelm-non-imm.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-stelm-non-imm.ll
index 42c7c0da17469..faa7d501eb743 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-stelm-non-imm.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-stelm-non-imm.ll
@@ -1,65 +1,65 @@
 ; RUN: not llc --mtriple=loongarch64 --mattr=+lasx < %s 2>&1 | FileCheck %s
 
-declare void @llvm.loongarch.lasx.xvstelm.b(<32 x i8>, i8*, i32, i32)
+declare void @llvm.loongarch.lasx.xvstelm.b(<32 x i8>, ptr, i32, i32)
 
-define void @lasx_xvstelm_b(<32 x i8> %va, i8* %p, i32 %b) nounwind {
+define void @lasx_xvstelm_b(<32 x i8> %va, ptr %p, i32 %b) nounwind {
 ; CHECK: immarg operand has non-immediate parameter
 entry:
-  call void @llvm.loongarch.lasx.xvstelm.b(<32 x i8> %va, i8* %p, i32 %b, i32 1)
+  call void @llvm.loongarch.lasx.xvstelm.b(<32 x i8> %va, ptr %p, i32 %b, i32 1)
   ret void
 }
 
-define void @lasx_xvstelm_b_idx(<32 x i8> %va, i8* %p, i32 %b) nounwind {
+define void @lasx_xvstelm_b_idx(<32 x i8> %va, ptr %p, i32 %b) nounwind {
 ; CHECK: immarg operand has non-immediate parameter
 entry:
-  call void @llvm.loongarch.lasx.xvstelm.b(<32 x i8> %va, i8* %p, i32 1, i32 %b)
+  call void @llvm.loongarch.lasx.xvstelm.b(<32 x i8> %va, ptr %p, i32 1, i32 %b)
   ret void
 }
 
-declare void @llvm.loongarch.lasx.xvstelm.h(<16 x i16>, i8*, i32, i32)
+declare void @llvm.loongarch.lasx.xvstelm.h(<16 x i16>, ptr, i32, i32)
 
-define void @lasx_xvstelm_h(<16 x i16> %va, i8* %p, i32 %b) nounwind {
+define void @lasx_xvstelm_h(<16 x i16> %va, ptr %p, i32 %b) nounwind {
 ; CHECK: immarg operand has non-immediate parameter
 entry:
-  call void @llvm.loongarch.lasx.xvstelm.h(<16 x i16> %va, i8* %p, i32 %b, i32 1)
+  call void @llvm.loongarch.lasx.xvstelm.h(<16 x i16> %va, ptr %p, i32 %b, i32 1)
   ret void
 }
 
-define void @lasx_xvstelm_h_idx(<16 x i16> %va, i8* %p, i32 %b) nounwind {
+define void @lasx_xvstelm_h_idx(<16 x i16> %va, ptr %p, i32 %b) nounwind {
 ; CHECK: immarg operand has non-immediate parameter
 entry:
-  call void @llvm.loongarch.lasx.xvstelm.h(<16 x i16> %va, i8* %p, i32 2, i32 %b)
+  call void @llvm.loongarch.lasx.xvstelm.h(<16 x i16> %va, ptr %p, i32 2, i32 %b)
   ret void
 }
 
-declare void @llvm.loongarch.lasx.xvstelm.w(<8 x i32>, i8*, i32, i32)
+declare void @llvm.loongarch.lasx.xvstelm.w(<8 x i32>, ptr, i32, i32)
 
-define void @lasx_xvstelm_w(<8 x i32> %va, i8* %p, i32 %b) nounwind {
+define void @lasx_xvstelm_w(<8 x i32> %va, ptr %p, i32 %b) nounwind {
 ; CHECK: immarg operand has non-immediate parameter
 entry:
-  call void @llvm.loongarch.lasx.xvstelm.w(<8 x i32> %va, i8* %p, i32 %b, i32 1)
+  call void @llvm.loongarch.lasx.xvstelm.w(<8 x i32> %va, ptr %p, i32 %b, i32 1)
   ret void
 }
 
-define void @lasx_xvstelm_w_idx(<8 x i32> %va, i8* %p, i32 %b) nounwind {
+define void @lasx_xvstelm_w_idx(<8 x i32> %va, ptr %p, i32 %b) nounwind {
 ; CHECK: immarg operand has non-immediate parameter
 entry:
-  call void @llvm.loongarch.lasx.xvstelm.w(<8 x i32> %va, i8* %p, i32 4, i32 %b)
+  call void @llvm.loongarch.lasx.xvstelm.w(<8 x i32> %va, ptr %p, i32 4, i32 %b)
   ret void
 }
 
-declare void @llvm.loongarch.lasx.xvstelm.d(<4 x i64>, i8*, i32, i32)
+declare void @llvm.loongarch.lasx.xvstelm.d(<4 x i64>, ptr, i32, i32)
 
-define void @lasx_xvstelm_d(<4 x i64> %va, i8* %p, i32 %b) nounwind {
+define void @lasx_xvstelm_d(<4 x i64> %va, ptr %p, i32 %b) nounwind {
 ; CHECK: immarg operand has non-immediate parameter
 entry:
-  call void @llvm.loongarch.lasx.xvstelm.d(<4 x i64> %va, i8* %p, i32 %b, i32 1)
+  call void @llvm.loongarch.lasx.xvstelm.d(<4 x i64> %va, ptr %p, i32 %b, i32 1)
   ret void
 }
 
-define void @lasx_xvstelm_d_idx(<4 x i64> %va, i8* %p, i32 %b) nounwind {
+define void @lasx_xvstelm_d_idx(<4 x i64> %va, ptr %p, i32 %b) nounwind {
 ; CHECK: immarg operand has non-immediate parameter
 entry:
-  call void @llvm.loongarch.lasx.xvstelm.d(<4 x i64> %va, i8* %p, i32 8, i32 %b)
+  call void @llvm.loongarch.lasx.xvstelm.d(<4 x i64> %va, ptr %p, i32 8, i32 %b)
   ret void
 }

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-stelm.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-stelm.ll
index 52ef3c4714127..34d1866e9d5ed 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-stelm.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-stelm.ll
@@ -1,50 +1,50 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
 
-declare void @llvm.loongarch.lasx.xvstelm.b(<32 x i8>, i8*, i32, i32)
+declare void @llvm.loongarch.lasx.xvstelm.b(<32 x i8>, ptr, i32, i32)
 
-define void @lasx_xvstelm_b(<32 x i8> %va, i8* %p) nounwind {
+define void @lasx_xvstelm_b(<32 x i8> %va, ptr %p) nounwind {
 ; CHECK-LABEL: lasx_xvstelm_b:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xvstelm.b $xr0, $a0, 1, 1
 ; CHECK-NEXT:    ret
 entry:
-  call void @llvm.loongarch.lasx.xvstelm.b(<32 x i8> %va, i8* %p, i32 1, i32 1)
+  call void @llvm.loongarch.lasx.xvstelm.b(<32 x i8> %va, ptr %p, i32 1, i32 1)
   ret void
 }
 
-declare void @llvm.loongarch.lasx.xvstelm.h(<16 x i16>, i8*, i32, i32)
+declare void @llvm.loongarch.lasx.xvstelm.h(<16 x i16>, ptr, i32, i32)
 
-define void @lasx_xvstelm_h(<16 x i16> %va, i8* %p) nounwind {
+define void @lasx_xvstelm_h(<16 x i16> %va, ptr %p) nounwind {
 ; CHECK-LABEL: lasx_xvstelm_h:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xvstelm.h $xr0, $a0, 2, 1
 ; CHECK-NEXT:    ret
 entry:
-  call void @llvm.loongarch.lasx.xvstelm.h(<16 x i16> %va, i8* %p, i32 2, i32 1)
+  call void @llvm.loongarch.lasx.xvstelm.h(<16 x i16> %va, ptr %p, i32 2, i32 1)
   ret void
 }
 
-declare void @llvm.loongarch.lasx.xvstelm.w(<8 x i32>, i8*, i32, i32)
+declare void @llvm.loongarch.lasx.xvstelm.w(<8 x i32>, ptr, i32, i32)
 
-define void @lasx_xvstelm_w(<8 x i32> %va, i8* %p) nounwind {
+define void @lasx_xvstelm_w(<8 x i32> %va, ptr %p) nounwind {
 ; CHECK-LABEL: lasx_xvstelm_w:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xvstelm.w $xr0, $a0, 4, 1
 ; CHECK-NEXT:    ret
 entry:
-  call void @llvm.loongarch.lasx.xvstelm.w(<8 x i32> %va, i8* %p, i32 4, i32 1)
+  call void @llvm.loongarch.lasx.xvstelm.w(<8 x i32> %va, ptr %p, i32 4, i32 1)
   ret void
 }
 
-declare void @llvm.loongarch.lasx.xvstelm.d(<4 x i64>, i8*, i32, i32)
+declare void @llvm.loongarch.lasx.xvstelm.d(<4 x i64>, ptr, i32, i32)
 
-define void @lasx_xvstelm_d(<4 x i64> %va, i8* %p) nounwind {
+define void @lasx_xvstelm_d(<4 x i64> %va, ptr %p) nounwind {
 ; CHECK-LABEL: lasx_xvstelm_d:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xvstelm.d $xr0, $a0, 8, 1
 ; CHECK-NEXT:    ret
 entry:
-  call void @llvm.loongarch.lasx.xvstelm.d(<4 x i64> %va, i8* %p, i32 8, i32 1)
+  call void @llvm.loongarch.lasx.xvstelm.d(<4 x i64> %va, ptr %p, i32 8, i32 1)
   ret void
 }

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ld-invalid-imm.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ld-invalid-imm.ll
index 3aeb30ce66b44..9375f9f01a92e 100644
--- a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ld-invalid-imm.ll
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ld-invalid-imm.ll
@@ -1,17 +1,17 @@
 ; RUN: not llc --mtriple=loongarch64 --mattr=+lsx < %s 2>&1 | FileCheck %s
 
-declare <16 x i8> @llvm.loongarch.lsx.vld(i8*, i32)
+declare <16 x i8> @llvm.loongarch.lsx.vld(ptr, i32)
 
-define <16 x i8> @lsx_vld_lo(i8* %p) nounwind {
+define <16 x i8> @lsx_vld_lo(ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lsx.vld: argument out of range
 entry:
-  %res = call <16 x i8> @llvm.loongarch.lsx.vld(i8* %p, i32 -2049)
+  %res = call <16 x i8> @llvm.loongarch.lsx.vld(ptr %p, i32 -2049)
   ret <16 x i8> %res
 }
 
-define <16 x i8> @lsx_vld_hi(i8* %p) nounwind {
+define <16 x i8> @lsx_vld_hi(ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lsx.vld: argument out of range
 entry:
-  %res = call <16 x i8> @llvm.loongarch.lsx.vld(i8* %p, i32 2048)
+  %res = call <16 x i8> @llvm.loongarch.lsx.vld(ptr %p, i32 2048)
   ret <16 x i8> %res
 }

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ld-non-imm.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ld-non-imm.ll
index db6a0318d87ae..f8b4c42326df0 100644
--- a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ld-non-imm.ll
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ld-non-imm.ll
@@ -1,10 +1,10 @@
 ; RUN: not llc --mtriple=loongarch64 --mattr=+lsx < %s 2>&1 | FileCheck %s
 
-declare <16 x i8> @llvm.loongarch.lsx.vld(i8*, i32)
+declare <16 x i8> @llvm.loongarch.lsx.vld(ptr, i32)
 
-define <16 x i8> @lsx_vld(i8* %p, i32 %a) nounwind {
+define <16 x i8> @lsx_vld(ptr %p, i32 %a) nounwind {
 ; CHECK: immarg operand has non-immediate parameter
 entry:
-  %res = call <16 x i8> @llvm.loongarch.lsx.vld(i8* %p, i32 %a)
+  %res = call <16 x i8> @llvm.loongarch.lsx.vld(ptr %p, i32 %a)
   ret <16 x i8> %res
 }

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ld.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ld.ll
index b9e2ff8088d83..b9d38b326ffaf 100644
--- a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ld.ll
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ld.ll
@@ -1,26 +1,26 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
 
-declare <16 x i8> @llvm.loongarch.lsx.vld(i8*, i32)
+declare <16 x i8> @llvm.loongarch.lsx.vld(ptr, i32)
 
-define <16 x i8> @lsx_vld(i8* %p) nounwind {
+define <16 x i8> @lsx_vld(ptr %p) nounwind {
 ; CHECK-LABEL: lsx_vld:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vld $vr0, $a0, 1
 ; CHECK-NEXT:    ret
 entry:
-  %res = call <16 x i8> @llvm.loongarch.lsx.vld(i8* %p, i32 1)
+  %res = call <16 x i8> @llvm.loongarch.lsx.vld(ptr %p, i32 1)
   ret <16 x i8> %res
 }
 
-declare <16 x i8> @llvm.loongarch.lsx.vldx(i8*, i64)
+declare <16 x i8> @llvm.loongarch.lsx.vldx(ptr, i64)
 
-define <16 x i8> @lsx_vldx(i8* %p, i64 %b) nounwind {
+define <16 x i8> @lsx_vldx(ptr %p, i64 %b) nounwind {
 ; CHECK-LABEL: lsx_vldx:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vldx $vr0, $a0, $a1
 ; CHECK-NEXT:    ret
 entry:
-  %res = call <16 x i8> @llvm.loongarch.lsx.vldx(i8* %p, i64 %b)
+  %res = call <16 x i8> @llvm.loongarch.lsx.vldx(ptr %p, i64 %b)
   ret <16 x i8> %res
 }

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ldrepl-invalid-imm.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ldrepl-invalid-imm.ll
index cb640e1245daa..34bf945c9df46 100644
--- a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ldrepl-invalid-imm.ll
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ldrepl-invalid-imm.ll
@@ -1,65 +1,65 @@
 ; RUN: not llc --mtriple=loongarch64 --mattr=+lsx < %s 2>&1 | FileCheck %s
 
-declare <16 x i8> @llvm.loongarch.lsx.vldrepl.b(i8*, i32)
+declare <16 x i8> @llvm.loongarch.lsx.vldrepl.b(ptr, i32)
 
-define <16 x i8> @lsx_vldrepl_b_lo(i8* %p) nounwind {
+define <16 x i8> @lsx_vldrepl_b_lo(ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lsx.vldrepl.b: argument out of range
 entry:
-  %res = call <16 x i8> @llvm.loongarch.lsx.vldrepl.b(i8* %p, i32 -2049)
+  %res = call <16 x i8> @llvm.loongarch.lsx.vldrepl.b(ptr %p, i32 -2049)
   ret <16 x i8> %res
 }
 
-define <16 x i8> @lsx_vldrepl_b_hi(i8* %p) nounwind {
+define <16 x i8> @lsx_vldrepl_b_hi(ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lsx.vldrepl.b: argument out of range
 entry:
-  %res = call <16 x i8> @llvm.loongarch.lsx.vldrepl.b(i8* %p, i32 2048)
+  %res = call <16 x i8> @llvm.loongarch.lsx.vldrepl.b(ptr %p, i32 2048)
   ret <16 x i8> %res
 }
 
-declare <8 x i16> @llvm.loongarch.lsx.vldrepl.h(i8*, i32)
+declare <8 x i16> @llvm.loongarch.lsx.vldrepl.h(ptr, i32)
 
-define <8 x i16> @lsx_vldrepl_h_lo(i8* %p) nounwind {
+define <8 x i16> @lsx_vldrepl_h_lo(ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lsx.vldrepl.h: argument out of range or not a multiple of 2.
 entry:
-  %res = call <8 x i16> @llvm.loongarch.lsx.vldrepl.h(i8* %p, i32 -2050)
+  %res = call <8 x i16> @llvm.loongarch.lsx.vldrepl.h(ptr %p, i32 -2050)
   ret <8 x i16> %res
 }
 
-define <8 x i16> @lsx_vldrepl_h_hi(i8* %p) nounwind {
+define <8 x i16> @lsx_vldrepl_h_hi(ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lsx.vldrepl.h: argument out of range or not a multiple of 2.
 entry:
-  %res = call <8 x i16> @llvm.loongarch.lsx.vldrepl.h(i8* %p, i32 2048)
+  %res = call <8 x i16> @llvm.loongarch.lsx.vldrepl.h(ptr %p, i32 2048)
   ret <8 x i16> %res
 }
 
-declare <4 x i32> @llvm.loongarch.lsx.vldrepl.w(i8*, i32)
+declare <4 x i32> @llvm.loongarch.lsx.vldrepl.w(ptr, i32)
 
-define <4 x i32> @lsx_vldrepl_w_lo(i8* %p) nounwind {
+define <4 x i32> @lsx_vldrepl_w_lo(ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lsx.vldrepl.w: argument out of range or not a multiple of 4.
 entry:
-  %res = call <4 x i32> @llvm.loongarch.lsx.vldrepl.w(i8* %p, i32 -2052)
+  %res = call <4 x i32> @llvm.loongarch.lsx.vldrepl.w(ptr %p, i32 -2052)
   ret <4 x i32> %res
 }
 
-define <4 x i32> @lsx_vldrepl_w_hi(i8* %p) nounwind {
+define <4 x i32> @lsx_vldrepl_w_hi(ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lsx.vldrepl.w: argument out of range or not a multiple of 4.
 entry:
-  %res = call <4 x i32> @llvm.loongarch.lsx.vldrepl.w(i8* %p, i32 2048)
+  %res = call <4 x i32> @llvm.loongarch.lsx.vldrepl.w(ptr %p, i32 2048)
   ret <4 x i32> %res
 }
 
-declare <2 x i64> @llvm.loongarch.lsx.vldrepl.d(i8*, i32)
+declare <2 x i64> @llvm.loongarch.lsx.vldrepl.d(ptr, i32)
 
-define <2 x i64> @lsx_vldrepl_d_lo(i8* %p) nounwind {
+define <2 x i64> @lsx_vldrepl_d_lo(ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lsx.vldrepl.d: argument out of range or not a multiple of 8.
 entry:
-  %res = call <2 x i64> @llvm.loongarch.lsx.vldrepl.d(i8* %p, i32 -2056)
+  %res = call <2 x i64> @llvm.loongarch.lsx.vldrepl.d(ptr %p, i32 -2056)
   ret <2 x i64> %res
 }
 
-define <2 x i64> @lsx_vldrepl_d_hi(i8* %p) nounwind {
+define <2 x i64> @lsx_vldrepl_d_hi(ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lsx.vldrepl.d: argument out of range or not a multiple of 8.
 entry:
-  %res = call <2 x i64> @llvm.loongarch.lsx.vldrepl.d(i8* %p, i32 2048)
+  %res = call <2 x i64> @llvm.loongarch.lsx.vldrepl.d(ptr %p, i32 2048)
   ret <2 x i64> %res
 }

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ldrepl-non-imm.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ldrepl-non-imm.ll
index e60b21913c699..9613c1a62540c 100644
--- a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ldrepl-non-imm.ll
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ldrepl-non-imm.ll
@@ -1,37 +1,37 @@
 ; RUN: not llc --mtriple=loongarch64 --mattr=+lsx < %s 2>&1 | FileCheck %s
 
-declare <16 x i8> @llvm.loongarch.lsx.vldrepl.b(i8*, i32)
+declare <16 x i8> @llvm.loongarch.lsx.vldrepl.b(ptr, i32)
 
-define <16 x i8> @lsx_vldrepl_b(i8* %p, i32 %a) nounwind {
+define <16 x i8> @lsx_vldrepl_b(ptr %p, i32 %a) nounwind {
 ; CHECK: immarg operand has non-immediate parameter
 entry:
-  %res = call <16 x i8> @llvm.loongarch.lsx.vldrepl.b(i8* %p, i32 %a)
+  %res = call <16 x i8> @llvm.loongarch.lsx.vldrepl.b(ptr %p, i32 %a)
   ret <16 x i8> %res
 }
 
-declare <8 x i16> @llvm.loongarch.lsx.vldrepl.h(i8*, i32)
+declare <8 x i16> @llvm.loongarch.lsx.vldrepl.h(ptr, i32)
 
-define <8 x i16> @lsx_vldrepl_h(i8* %p, i32 %a) nounwind {
+define <8 x i16> @lsx_vldrepl_h(ptr %p, i32 %a) nounwind {
 ; CHECK: immarg operand has non-immediate parameter
 entry:
-  %res = call <8 x i16> @llvm.loongarch.lsx.vldrepl.h(i8* %p, i32 %a)
+  %res = call <8 x i16> @llvm.loongarch.lsx.vldrepl.h(ptr %p, i32 %a)
   ret <8 x i16> %res
 }
 
-declare <4 x i32> @llvm.loongarch.lsx.vldrepl.w(i8*, i32)
+declare <4 x i32> @llvm.loongarch.lsx.vldrepl.w(ptr, i32)
 
-define <4 x i32> @lsx_vldrepl_w(i8* %p, i32 %a) nounwind {
+define <4 x i32> @lsx_vldrepl_w(ptr %p, i32 %a) nounwind {
 ; CHECK: immarg operand has non-immediate parameter
 entry:
-  %res = call <4 x i32> @llvm.loongarch.lsx.vldrepl.w(i8* %p, i32 %a)
+  %res = call <4 x i32> @llvm.loongarch.lsx.vldrepl.w(ptr %p, i32 %a)
   ret <4 x i32> %res
 }
 
-declare <2 x i64> @llvm.loongarch.lsx.vldrepl.d(i8*, i32)
+declare <2 x i64> @llvm.loongarch.lsx.vldrepl.d(ptr, i32)
 
-define <2 x i64> @lsx_vldrepl_d(i8* %p, i32 %a) nounwind {
+define <2 x i64> @lsx_vldrepl_d(ptr %p, i32 %a) nounwind {
 ; CHECK: immarg operand has non-immediate parameter
 entry:
-  %res = call <2 x i64> @llvm.loongarch.lsx.vldrepl.d(i8* %p, i32 %a)
+  %res = call <2 x i64> @llvm.loongarch.lsx.vldrepl.d(ptr %p, i32 %a)
   ret <2 x i64> %res
 }

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ldrepl.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ldrepl.ll
index 1a9cf3d3a7665..9ebe0c2fccd57 100644
--- a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ldrepl.ll
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ldrepl.ll
@@ -1,50 +1,50 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
 
-declare <16 x i8> @llvm.loongarch.lsx.vldrepl.b(i8*, i32)
+declare <16 x i8> @llvm.loongarch.lsx.vldrepl.b(ptr, i32)
 
-define <16 x i8> @lsx_vldrepl_b(i8* %p, i32 %b) nounwind {
+define <16 x i8> @lsx_vldrepl_b(ptr %p, i32 %b) nounwind {
 ; CHECK-LABEL: lsx_vldrepl_b:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vldrepl.b $vr0, $a0, 1
 ; CHECK-NEXT:    ret
 entry:
-  %res = call <16 x i8> @llvm.loongarch.lsx.vldrepl.b(i8* %p, i32 1)
+  %res = call <16 x i8> @llvm.loongarch.lsx.vldrepl.b(ptr %p, i32 1)
   ret <16 x i8> %res
 }
 
-declare <8 x i16> @llvm.loongarch.lsx.vldrepl.h(i8*, i32)
+declare <8 x i16> @llvm.loongarch.lsx.vldrepl.h(ptr, i32)
 
-define <8 x i16> @lsx_vldrepl_h(i8* %p, i32 %b) nounwind {
+define <8 x i16> @lsx_vldrepl_h(ptr %p, i32 %b) nounwind {
 ; CHECK-LABEL: lsx_vldrepl_h:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vldrepl.h $vr0, $a0, 2
 ; CHECK-NEXT:    ret
 entry:
-  %res = call <8 x i16> @llvm.loongarch.lsx.vldrepl.h(i8* %p, i32 2)
+  %res = call <8 x i16> @llvm.loongarch.lsx.vldrepl.h(ptr %p, i32 2)
   ret <8 x i16> %res
 }
 
-declare <4 x i32> @llvm.loongarch.lsx.vldrepl.w(i8*, i32)
+declare <4 x i32> @llvm.loongarch.lsx.vldrepl.w(ptr, i32)
 
-define <4 x i32> @lsx_vldrepl_w(i8* %p, i32 %b) nounwind {
+define <4 x i32> @lsx_vldrepl_w(ptr %p, i32 %b) nounwind {
 ; CHECK-LABEL: lsx_vldrepl_w:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vldrepl.w $vr0, $a0, 4
 ; CHECK-NEXT:    ret
 entry:
-  %res = call <4 x i32> @llvm.loongarch.lsx.vldrepl.w(i8* %p, i32 4)
+  %res = call <4 x i32> @llvm.loongarch.lsx.vldrepl.w(ptr %p, i32 4)
   ret <4 x i32> %res
 }
 
-declare <2 x i64> @llvm.loongarch.lsx.vldrepl.d(i8*, i32)
+declare <2 x i64> @llvm.loongarch.lsx.vldrepl.d(ptr, i32)
 
-define <2 x i64> @lsx_vldrepl_d(i8* %p, i32 %b) nounwind {
+define <2 x i64> @lsx_vldrepl_d(ptr %p, i32 %b) nounwind {
 ; CHECK-LABEL: lsx_vldrepl_d:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vldrepl.d $vr0, $a0, 8
 ; CHECK-NEXT:    ret
 entry:
-  %res = call <2 x i64> @llvm.loongarch.lsx.vldrepl.d(i8* %p, i32 8)
+  %res = call <2 x i64> @llvm.loongarch.lsx.vldrepl.d(ptr %p, i32 8)
   ret <2 x i64> %res
 }

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-st-invalid-imm.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-st-invalid-imm.ll
index 64518380964b4..a72126cd15a66 100644
--- a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-st-invalid-imm.ll
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-st-invalid-imm.ll
@@ -1,17 +1,17 @@
 ; RUN: not llc --mtriple=loongarch64 --mattr=+lsx < %s 2>&1 | FileCheck %s
 
-declare void @llvm.loongarch.lsx.vst(<16 x i8>, i8*, i32)
+declare void @llvm.loongarch.lsx.vst(<16 x i8>, ptr, i32)
 
-define void @lsx_vst_lo(<16 x i8> %va, i8* %p) nounwind {
+define void @lsx_vst_lo(<16 x i8> %va, ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lsx.vst: argument out of range
 entry:
-  call void @llvm.loongarch.lsx.vst(<16 x i8> %va, i8* %p, i32 -2049)
+  call void @llvm.loongarch.lsx.vst(<16 x i8> %va, ptr %p, i32 -2049)
   ret void
 }
 
-define void @lsx_vst_hi(<16 x i8> %va, i8* %p) nounwind {
+define void @lsx_vst_hi(<16 x i8> %va, ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lsx.vst: argument out of range
 entry:
-  call void @llvm.loongarch.lsx.vst(<16 x i8> %va, i8* %p, i32 2048)
+  call void @llvm.loongarch.lsx.vst(<16 x i8> %va, ptr %p, i32 2048)
   ret void
 }

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-st-non-imm.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-st-non-imm.ll
index 119ed9b786586..ba9f44c59c37d 100644
--- a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-st-non-imm.ll
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-st-non-imm.ll
@@ -1,10 +1,10 @@
 ; RUN: not llc --mtriple=loongarch64 --mattr=+lsx < %s 2>&1 | FileCheck %s
 
-declare void @llvm.loongarch.lsx.vst(<16 x i8>, i8*, i32)
+declare void @llvm.loongarch.lsx.vst(<16 x i8>, ptr, i32)
 
-define void @lsx_vst(<16 x i8> %va, i8* %p, i32 %b) nounwind {
+define void @lsx_vst(<16 x i8> %va, ptr %p, i32 %b) nounwind {
 ; CHECK: immarg operand has non-immediate parameter
 entry:
-  call void @llvm.loongarch.lsx.vst(<16 x i8> %va, i8* %p, i32 %b)
+  call void @llvm.loongarch.lsx.vst(<16 x i8> %va, ptr %p, i32 %b)
   ret void
 }

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-st.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-st.ll
index 798f509f2318e..b95bfe9b40d81 100644
--- a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-st.ll
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-st.ll
@@ -1,26 +1,26 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
 
-declare void @llvm.loongarch.lsx.vst(<16 x i8>, i8*, i32)
+declare void @llvm.loongarch.lsx.vst(<16 x i8>, ptr, i32)
 
-define void @lsx_vst(<16 x i8> %va, i8* %p) nounwind {
+define void @lsx_vst(<16 x i8> %va, ptr %p) nounwind {
 ; CHECK-LABEL: lsx_vst:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vst $vr0, $a0, -2048
 ; CHECK-NEXT:    ret
 entry:
-  call void @llvm.loongarch.lsx.vst(<16 x i8> %va, i8* %p, i32 -2048)
+  call void @llvm.loongarch.lsx.vst(<16 x i8> %va, ptr %p, i32 -2048)
   ret void
 }
 
-declare void @llvm.loongarch.lsx.vstx(<16 x i8>, i8*, i64)
+declare void @llvm.loongarch.lsx.vstx(<16 x i8>, ptr, i64)
 
-define void @lsx_vstx(<16 x i8> %va, i8* %p, i64 %c) nounwind {
+define void @lsx_vstx(<16 x i8> %va, ptr %p, i64 %c) nounwind {
 ; CHECK-LABEL: lsx_vstx:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vstx $vr0, $a0, $a1
 ; CHECK-NEXT:    ret
 entry:
-  call void @llvm.loongarch.lsx.vstx(<16 x i8> %va, i8* %p, i64 %c)
+  call void @llvm.loongarch.lsx.vstx(<16 x i8> %va, ptr %p, i64 %c)
   ret void
 }

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-stelm-invalid-imm.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-stelm-invalid-imm.ll
index 277abcbd34ccb..82dba30ed1e7d 100644
--- a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-stelm-invalid-imm.ll
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-stelm-invalid-imm.ll
@@ -1,121 +1,121 @@
 ; RUN: not llc --mtriple=loongarch64 --mattr=+lsx < %s 2>&1 | FileCheck %s
 
-declare void @llvm.loongarch.lsx.vstelm.b(<16 x i8>, i8*, i32, i32)
+declare void @llvm.loongarch.lsx.vstelm.b(<16 x i8>, ptr, i32, i32)
 
-define void @lsx_vstelm_b_lo(<16 x i8> %va, i8* %p) nounwind {
+define void @lsx_vstelm_b_lo(<16 x i8> %va, ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lsx.vstelm.b: argument out of range
 entry:
-  call void @llvm.loongarch.lsx.vstelm.b(<16 x i8> %va, i8* %p, i32 -129, i32 15)
+  call void @llvm.loongarch.lsx.vstelm.b(<16 x i8> %va, ptr %p, i32 -129, i32 15)
   ret void
 }
 
-define void @lsx_vstelm_b_hi(<16 x i8> %va, i8* %p) nounwind {
+define void @lsx_vstelm_b_hi(<16 x i8> %va, ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lsx.vstelm.b: argument out of range
 entry:
-  call void @llvm.loongarch.lsx.vstelm.b(<16 x i8> %va, i8* %p, i32 128, i32 15)
+  call void @llvm.loongarch.lsx.vstelm.b(<16 x i8> %va, ptr %p, i32 128, i32 15)
   ret void
 }
 
-define void @lsx_vstelm_b_idx_lo(<16 x i8> %va, i8* %p) nounwind {
+define void @lsx_vstelm_b_idx_lo(<16 x i8> %va, ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lsx.vstelm.b: argument out of range
 entry:
-  call void @llvm.loongarch.lsx.vstelm.b(<16 x i8> %va, i8* %p, i32 1, i32 -1)
+  call void @llvm.loongarch.lsx.vstelm.b(<16 x i8> %va, ptr %p, i32 1, i32 -1)
   ret void
 }
 
-define void @lsx_vstelm_b_idx_hi(<16 x i8> %va, i8* %p) nounwind {
+define void @lsx_vstelm_b_idx_hi(<16 x i8> %va, ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lsx.vstelm.b: argument out of range
 entry:
-  call void @llvm.loongarch.lsx.vstelm.b(<16 x i8> %va, i8* %p, i32 1, i32 16)
+  call void @llvm.loongarch.lsx.vstelm.b(<16 x i8> %va, ptr %p, i32 1, i32 16)
   ret void
 }
 
-declare void @llvm.loongarch.lsx.vstelm.h(<8 x i16>, i8*, i32, i32)
+declare void @llvm.loongarch.lsx.vstelm.h(<8 x i16>, ptr, i32, i32)
 
-define void @lsx_vstelm_h_lo(<8 x i16> %va, i8* %p) nounwind {
+define void @lsx_vstelm_h_lo(<8 x i16> %va, ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lsx.vstelm.h: argument out of range or not a multiple of 2.
 entry:
-  call void @llvm.loongarch.lsx.vstelm.h(<8 x i16> %va, i8* %p, i32 -258, i32 7)
+  call void @llvm.loongarch.lsx.vstelm.h(<8 x i16> %va, ptr %p, i32 -258, i32 7)
   ret void
 }
 
-define void @lsx_vstelm_h_hi(<8 x i16> %va, i8* %p) nounwind {
+define void @lsx_vstelm_h_hi(<8 x i16> %va, ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lsx.vstelm.h: argument out of range or not a multiple of 2.
 entry:
-  call void @llvm.loongarch.lsx.vstelm.h(<8 x i16> %va, i8* %p, i32 256, i32 7)
+  call void @llvm.loongarch.lsx.vstelm.h(<8 x i16> %va, ptr %p, i32 256, i32 7)
   ret void
 }
 
-define void @lsx_vstelm_h_idx_lo(<8 x i16> %va, i8* %p) nounwind {
+define void @lsx_vstelm_h_idx_lo(<8 x i16> %va, ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lsx.vstelm.h: argument out of range or not a multiple of 2.
 entry:
-  call void @llvm.loongarch.lsx.vstelm.h(<8 x i16> %va, i8* %p, i32 2, i32 -1)
+  call void @llvm.loongarch.lsx.vstelm.h(<8 x i16> %va, ptr %p, i32 2, i32 -1)
   ret void
 }
 
-define void @lsx_vstelm_h_idx_hi(<8 x i16> %va, i8* %p) nounwind {
+define void @lsx_vstelm_h_idx_hi(<8 x i16> %va, ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lsx.vstelm.h: argument out of range or not a multiple of 2.
 entry:
-  call void @llvm.loongarch.lsx.vstelm.h(<8 x i16> %va, i8* %p, i32 2, i32 8)
+  call void @llvm.loongarch.lsx.vstelm.h(<8 x i16> %va, ptr %p, i32 2, i32 8)
   ret void
 }
 
-declare void @llvm.loongarch.lsx.vstelm.w(<4 x i32>, i8*, i32, i32)
+declare void @llvm.loongarch.lsx.vstelm.w(<4 x i32>, ptr, i32, i32)
 
-define void @lsx_vstelm_w_lo(<4 x i32> %va, i8* %p) nounwind {
+define void @lsx_vstelm_w_lo(<4 x i32> %va, ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lsx.vstelm.w: argument out of range or not a multiple of 4.
 entry:
-  call void @llvm.loongarch.lsx.vstelm.w(<4 x i32> %va, i8* %p, i32 -516, i32 3)
+  call void @llvm.loongarch.lsx.vstelm.w(<4 x i32> %va, ptr %p, i32 -516, i32 3)
   ret void
 }
 
-define void @lsx_vstelm_w_hi(<4 x i32> %va, i8* %p) nounwind {
+define void @lsx_vstelm_w_hi(<4 x i32> %va, ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lsx.vstelm.w: argument out of range or not a multiple of 4.
 entry:
-  call void @llvm.loongarch.lsx.vstelm.w(<4 x i32> %va, i8* %p, i32 512, i32 3)
+  call void @llvm.loongarch.lsx.vstelm.w(<4 x i32> %va, ptr %p, i32 512, i32 3)
   ret void
 }
 
-define void @lsx_vstelm_w_idx_lo(<4 x i32> %va, i8* %p) nounwind {
+define void @lsx_vstelm_w_idx_lo(<4 x i32> %va, ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lsx.vstelm.w: argument out of range or not a multiple of 4.
 entry:
-  call void @llvm.loongarch.lsx.vstelm.w(<4 x i32> %va, i8* %p, i32 4, i32 -1)
+  call void @llvm.loongarch.lsx.vstelm.w(<4 x i32> %va, ptr %p, i32 4, i32 -1)
   ret void
 }
 
-define void @lsx_vstelm_w_idx_hi(<4 x i32> %va, i8* %p) nounwind {
+define void @lsx_vstelm_w_idx_hi(<4 x i32> %va, ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lsx.vstelm.w: argument out of range or not a multiple of 4.
 entry:
-  call void @llvm.loongarch.lsx.vstelm.w(<4 x i32> %va, i8* %p, i32 4, i32 4)
+  call void @llvm.loongarch.lsx.vstelm.w(<4 x i32> %va, ptr %p, i32 4, i32 4)
   ret void
 }
 
-declare void @llvm.loongarch.lsx.vstelm.d(<2 x i64>, i8*, i32, i32)
+declare void @llvm.loongarch.lsx.vstelm.d(<2 x i64>, ptr, i32, i32)
 
-define void @lsx_vstelm_d_lo(<2 x i64> %va, i8* %p) nounwind {
+define void @lsx_vstelm_d_lo(<2 x i64> %va, ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lsx.vstelm.d: argument out of range or not a multiple of 8.
 entry:
-  call void @llvm.loongarch.lsx.vstelm.d(<2 x i64> %va, i8* %p, i32 -1032, i32 1)
+  call void @llvm.loongarch.lsx.vstelm.d(<2 x i64> %va, ptr %p, i32 -1032, i32 1)
   ret void
 }
 
-define void @lsx_vstelm_d_hi(<2 x i64> %va, i8* %p) nounwind {
+define void @lsx_vstelm_d_hi(<2 x i64> %va, ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lsx.vstelm.d: argument out of range or not a multiple of 8.
 entry:
-  call void @llvm.loongarch.lsx.vstelm.d(<2 x i64> %va, i8* %p, i32 1024, i32 1)
+  call void @llvm.loongarch.lsx.vstelm.d(<2 x i64> %va, ptr %p, i32 1024, i32 1)
   ret void
 }
 
-define void @lsx_vstelm_d_idx_lo(<2 x i64> %va, i8* %p) nounwind {
+define void @lsx_vstelm_d_idx_lo(<2 x i64> %va, ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lsx.vstelm.d: argument out of range or not a multiple of 8.
 entry:
-  call void @llvm.loongarch.lsx.vstelm.d(<2 x i64> %va, i8* %p, i32 8, i32 -1)
+  call void @llvm.loongarch.lsx.vstelm.d(<2 x i64> %va, ptr %p, i32 8, i32 -1)
   ret void
 }
 
-define void @lsx_vstelm_d_idx_hi(<2 x i64> %va, i8* %p) nounwind {
+define void @lsx_vstelm_d_idx_hi(<2 x i64> %va, ptr %p) nounwind {
 ; CHECK: llvm.loongarch.lsx.vstelm.d: argument out of range or not a multiple of 8.
 entry:
-  call void @llvm.loongarch.lsx.vstelm.d(<2 x i64> %va, i8* %p, i32 8, i32 2)
+  call void @llvm.loongarch.lsx.vstelm.d(<2 x i64> %va, ptr %p, i32 8, i32 2)
   ret void
 }

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-stelm-non-imm.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-stelm-non-imm.ll
index f53932f790355..a8a74819c2049 100644
--- a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-stelm-non-imm.ll
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-stelm-non-imm.ll
@@ -1,65 +1,65 @@
 ; RUN: not llc --mtriple=loongarch64 --mattr=+lsx < %s 2>&1 | FileCheck %s
 
-declare void @llvm.loongarch.lsx.vstelm.b(<16 x i8>, i8*, i32, i32)
+declare void @llvm.loongarch.lsx.vstelm.b(<16 x i8>, ptr, i32, i32)
 
-define void @lsx_vstelm_b(<16 x i8> %va, i8* %p, i32 %b) nounwind {
+define void @lsx_vstelm_b(<16 x i8> %va, ptr %p, i32 %b) nounwind {
 ; CHECK: immarg operand has non-immediate parameter
 entry:
-  call void @llvm.loongarch.lsx.vstelm.b(<16 x i8> %va, i8* %p, i32 %b, i32 1)
+  call void @llvm.loongarch.lsx.vstelm.b(<16 x i8> %va, ptr %p, i32 %b, i32 1)
   ret void
 }
 
-define void @lsx_vstelm_b_idx(<16 x i8> %va, i8* %p, i32 %b) nounwind {
+define void @lsx_vstelm_b_idx(<16 x i8> %va, ptr %p, i32 %b) nounwind {
 ; CHECK: immarg operand has non-immediate parameter
 entry:
-  call void @llvm.loongarch.lsx.vstelm.b(<16 x i8> %va, i8* %p, i32 1, i32 %b)
+  call void @llvm.loongarch.lsx.vstelm.b(<16 x i8> %va, ptr %p, i32 1, i32 %b)
   ret void
 }
 
-declare void @llvm.loongarch.lsx.vstelm.h(<8 x i16>, i8*, i32, i32)
+declare void @llvm.loongarch.lsx.vstelm.h(<8 x i16>, ptr, i32, i32)
 
-define void @lsx_vstelm_h(<8 x i16> %va, i8* %p, i32 %b) nounwind {
+define void @lsx_vstelm_h(<8 x i16> %va, ptr %p, i32 %b) nounwind {
 ; CHECK: immarg operand has non-immediate parameter
 entry:
-  call void @llvm.loongarch.lsx.vstelm.h(<8 x i16> %va, i8* %p, i32 %b, i32 1)
+  call void @llvm.loongarch.lsx.vstelm.h(<8 x i16> %va, ptr %p, i32 %b, i32 1)
   ret void
 }
 
-define void @lsx_vstelm_h_idx(<8 x i16> %va, i8* %p, i32 %b) nounwind {
+define void @lsx_vstelm_h_idx(<8 x i16> %va, ptr %p, i32 %b) nounwind {
 ; CHECK: immarg operand has non-immediate parameter
 entry:
-  call void @llvm.loongarch.lsx.vstelm.h(<8 x i16> %va, i8* %p, i32 2, i32 %b)
+  call void @llvm.loongarch.lsx.vstelm.h(<8 x i16> %va, ptr %p, i32 2, i32 %b)
   ret void
 }
 
-declare void @llvm.loongarch.lsx.vstelm.w(<4 x i32>, i8*, i32, i32)
+declare void @llvm.loongarch.lsx.vstelm.w(<4 x i32>, ptr, i32, i32)
 
-define void @lsx_vstelm_w(<4 x i32> %va, i8* %p, i32 %b) nounwind {
+define void @lsx_vstelm_w(<4 x i32> %va, ptr %p, i32 %b) nounwind {
 ; CHECK: immarg operand has non-immediate parameter
 entry:
-  call void @llvm.loongarch.lsx.vstelm.w(<4 x i32> %va, i8* %p, i32 %b, i32 1)
+  call void @llvm.loongarch.lsx.vstelm.w(<4 x i32> %va, ptr %p, i32 %b, i32 1)
   ret void
 }
 
-define void @lsx_vstelm_w_idx(<4 x i32> %va, i8* %p, i32 %b) nounwind {
+define void @lsx_vstelm_w_idx(<4 x i32> %va, ptr %p, i32 %b) nounwind {
 ; CHECK: immarg operand has non-immediate parameter
 entry:
-  call void @llvm.loongarch.lsx.vstelm.w(<4 x i32> %va, i8* %p, i32 4, i32 %b)
+  call void @llvm.loongarch.lsx.vstelm.w(<4 x i32> %va, ptr %p, i32 4, i32 %b)
   ret void
 }
 
-declare void @llvm.loongarch.lsx.vstelm.d(<2 x i64>, i8*, i32, i32)
+declare void @llvm.loongarch.lsx.vstelm.d(<2 x i64>, ptr, i32, i32)
 
-define void @lsx_vstelm_d(<2 x i64> %va, i8* %p, i32 %b) nounwind {
+define void @lsx_vstelm_d(<2 x i64> %va, ptr %p, i32 %b) nounwind {
 ; CHECK: immarg operand has non-immediate parameter
 entry:
-  call void @llvm.loongarch.lsx.vstelm.d(<2 x i64> %va, i8* %p, i32 %b, i32 1)
+  call void @llvm.loongarch.lsx.vstelm.d(<2 x i64> %va, ptr %p, i32 %b, i32 1)
   ret void
 }
 
-define void @lsx_vstelm_d_idx(<2 x i64> %va, i8* %p, i32 %b) nounwind {
+define void @lsx_vstelm_d_idx(<2 x i64> %va, ptr %p, i32 %b) nounwind {
 ; CHECK: immarg operand has non-immediate parameter
 entry:
-  call void @llvm.loongarch.lsx.vstelm.d(<2 x i64> %va, i8* %p, i32 8, i32 %b)
+  call void @llvm.loongarch.lsx.vstelm.d(<2 x i64> %va, ptr %p, i32 8, i32 %b)
   ret void
 }

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-stelm.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-stelm.ll
index 6b9e7a9d7462e..4f8412be9579c 100644
--- a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-stelm.ll
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-stelm.ll
@@ -1,50 +1,50 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
 
-declare void @llvm.loongarch.lsx.vstelm.b(<16 x i8>, i8*, i32, i32)
+declare void @llvm.loongarch.lsx.vstelm.b(<16 x i8>, ptr, i32, i32)
 
-define void @lsx_vstelm_b(<16 x i8> %va, i8* %p) nounwind {
+define void @lsx_vstelm_b(<16 x i8> %va, ptr %p) nounwind {
 ; CHECK-LABEL: lsx_vstelm_b:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vstelm.b $vr0, $a0, 1, 15
 ; CHECK-NEXT:    ret
 entry:
-  call void @llvm.loongarch.lsx.vstelm.b(<16 x i8> %va, i8* %p, i32 1, i32 15)
+  call void @llvm.loongarch.lsx.vstelm.b(<16 x i8> %va, ptr %p, i32 1, i32 15)
   ret void
 }
 
-declare void @llvm.loongarch.lsx.vstelm.h(<8 x i16>, i8*, i32, i32)
+declare void @llvm.loongarch.lsx.vstelm.h(<8 x i16>, ptr, i32, i32)
 
-define void @lsx_vstelm_h(<8 x i16> %va, i8* %p) nounwind {
+define void @lsx_vstelm_h(<8 x i16> %va, ptr %p) nounwind {
 ; CHECK-LABEL: lsx_vstelm_h:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vstelm.h $vr0, $a0, 2, 7
 ; CHECK-NEXT:    ret
 entry:
-  call void @llvm.loongarch.lsx.vstelm.h(<8 x i16> %va, i8* %p, i32 2, i32 7)
+  call void @llvm.loongarch.lsx.vstelm.h(<8 x i16> %va, ptr %p, i32 2, i32 7)
   ret void
 }
 
-declare void @llvm.loongarch.lsx.vstelm.w(<4 x i32>, i8*, i32, i32)
+declare void @llvm.loongarch.lsx.vstelm.w(<4 x i32>, ptr, i32, i32)
 
-define void @lsx_vstelm_w(<4 x i32> %va, i8* %p) nounwind {
+define void @lsx_vstelm_w(<4 x i32> %va, ptr %p) nounwind {
 ; CHECK-LABEL: lsx_vstelm_w:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vstelm.w $vr0, $a0, 4, 3
 ; CHECK-NEXT:    ret
 entry:
-  call void @llvm.loongarch.lsx.vstelm.w(<4 x i32> %va, i8* %p, i32 4, i32 3)
+  call void @llvm.loongarch.lsx.vstelm.w(<4 x i32> %va, ptr %p, i32 4, i32 3)
   ret void
 }
 
-declare void @llvm.loongarch.lsx.vstelm.d(<2 x i64>, i8*, i32, i32)
+declare void @llvm.loongarch.lsx.vstelm.d(<2 x i64>, ptr, i32, i32)
 
-define void @lsx_vstelm_d(<2 x i64> %va, i8* %p) nounwind {
+define void @lsx_vstelm_d(<2 x i64> %va, ptr %p) nounwind {
 ; CHECK-LABEL: lsx_vstelm_d:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vstelm.d $vr0, $a0, 8, 1
 ; CHECK-NEXT:    ret
 entry:
-  call void @llvm.loongarch.lsx.vstelm.d(<2 x i64> %va, i8* %p, i32 8, i32 1)
+  call void @llvm.loongarch.lsx.vstelm.d(<2 x i64> %va, ptr %p, i32 8, i32 1)
   ret void
 }

diff  --git a/llvm/test/CodeGen/LoongArch/tail-calls.ll b/llvm/test/CodeGen/LoongArch/tail-calls.ll
index 8f11e03431b92..52bdd230816f5 100644
--- a/llvm/test/CodeGen/LoongArch/tail-calls.ll
+++ b/llvm/test/CodeGen/LoongArch/tail-calls.ll
@@ -15,7 +15,7 @@ entry:
 ;; Perform tail call optimization for external symbol.
 ;; Bytes copied should be large enough, otherwise the memcpy call would be optimized to multiple ld/st insns.
 @dest = global [2 x i8] zeroinitializer
-declare void @llvm.memcpy.p0i8.p0i8.i32(ptr, ptr, i32, i1)
+declare void @llvm.memcpy.p0.p0.i32(ptr, ptr, i32, i1)
 define void @caller_extern(ptr %src) optsize {
 ; CHECK-LABEL: caller_extern:
 ; CHECK:       # %bb.0: # %entry
@@ -25,7 +25,7 @@ define void @caller_extern(ptr %src) optsize {
 ; CHECK-NEXT:    ori $a2, $zero, 33
 ; CHECK-NEXT:    b %plt(memcpy)
 entry:
-  tail call void @llvm.memcpy.p0i8.p0i8.i32(ptr getelementptr inbounds ([2 x i8], ptr @dest, i32 0, i32 0), ptr %src, i32 33, i1 false)
+  tail call void @llvm.memcpy.p0.p0.i32(ptr @dest, ptr %src, i32 33, i1 false)
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/MIR/AArch64/expected-target-flag-name.mir b/llvm/test/CodeGen/MIR/AArch64/expected-target-flag-name.mir
index 31489ab356e01..e6f65a3f577d3 100644
--- a/llvm/test/CodeGen/MIR/AArch64/expected-target-flag-name.mir
+++ b/llvm/test/CodeGen/MIR/AArch64/expected-target-flag-name.mir
@@ -7,7 +7,7 @@
 
   define i32 @sub_small() {
   entry:
-    %val32 = load i32, i32* @var_i32
+    %val32 = load i32, ptr @var_i32
     ret i32 %val32
   }
 

diff  --git a/llvm/test/CodeGen/MIR/AArch64/invalid-target-flag-name.mir b/llvm/test/CodeGen/MIR/AArch64/invalid-target-flag-name.mir
index 7886ae21793e6..7285a65f0296b 100644
--- a/llvm/test/CodeGen/MIR/AArch64/invalid-target-flag-name.mir
+++ b/llvm/test/CodeGen/MIR/AArch64/invalid-target-flag-name.mir
@@ -7,7 +7,7 @@
 
   define i32 @sub_small() {
   entry:
-    %val32 = load i32, i32* @var_i32
+    %val32 = load i32, ptr @var_i32
     ret i32 %val32
   }
 

diff  --git a/llvm/test/CodeGen/MIR/AArch64/machine-metadata-error.mir b/llvm/test/CodeGen/MIR/AArch64/machine-metadata-error.mir
index faf9c6a73ab03..65afb13b50753 100644
--- a/llvm/test/CodeGen/MIR/AArch64/machine-metadata-error.mir
+++ b/llvm/test/CodeGen/MIR/AArch64/machine-metadata-error.mir
@@ -5,7 +5,7 @@
   target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
   target triple = "aarch64-unknown-linux-gnu"
 
-  define i32 @test_memcpy(i32* nocapture %p, i32* nocapture readonly %q) {
+  define i32 @test_memcpy(ptr nocapture %p, ptr nocapture readonly %q) {
     ret i32 0
   }
 ...

diff  --git a/llvm/test/CodeGen/MIR/AArch64/machine-metadata.mir b/llvm/test/CodeGen/MIR/AArch64/machine-metadata.mir
index ab248b8249f2e..a0764ae25b492 100644
--- a/llvm/test/CodeGen/MIR/AArch64/machine-metadata.mir
+++ b/llvm/test/CodeGen/MIR/AArch64/machine-metadata.mir
@@ -6,49 +6,49 @@
   target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
   target triple = "aarch64-unknown-linux-gnu"
 
-  define i32 @test_memcpy(i32* nocapture %p, i32* nocapture readonly %q) {
-    %p0 = bitcast i32* %p to i8*
-    %add.ptr = getelementptr inbounds i32, i32* %p, i64 4
-    %p1 = bitcast i32* %add.ptr to i8*
-    tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 4 dereferenceable(16) %p0, i8* noundef nonnull align 4 dereferenceable(16) %p1, i64 16, i1 false), !alias.scope !0, !noalias !3
-    %v0 = load i32, i32* %q, align 4, !alias.scope !3, !noalias !0
-    %q1 = getelementptr inbounds i32, i32* %q, i64 1
-    %v1 = load i32, i32* %q1, align 4, !alias.scope !3, !noalias !0
+  define i32 @test_memcpy(ptr nocapture %p, ptr nocapture readonly %q) {
+    %p0 = bitcast ptr %p to ptr
+    %add.ptr = getelementptr inbounds i32, ptr %p, i64 4
+    %p1 = bitcast ptr %add.ptr to ptr
+    tail call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 4 dereferenceable(16) %p0, ptr noundef nonnull align 4 dereferenceable(16) %p1, i64 16, i1 false), !alias.scope !0, !noalias !3
+    %v0 = load i32, ptr %q, align 4, !alias.scope !3, !noalias !0
+    %q1 = getelementptr inbounds i32, ptr %q, i64 1
+    %v1 = load i32, ptr %q1, align 4, !alias.scope !3, !noalias !0
     %add = add i32 %v0, %v1
     ret i32 %add
   }
 
-  define i32 @test_memcpy_inline(i32* nocapture %p, i32* nocapture readonly %q) {
-    %p0 = bitcast i32* %p to i8*
-    %add.ptr = getelementptr inbounds i32, i32* %p, i64 4
-    %p1 = bitcast i32* %add.ptr to i8*
-    tail call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* noundef nonnull align 4 dereferenceable(16) %p0, i8* noundef nonnull align 4 dereferenceable(16) %p1, i64 16, i1 false), !alias.scope !0, !noalias !3
-    %v0 = load i32, i32* %q, align 4, !alias.scope !3, !noalias !0
-    %q1 = getelementptr inbounds i32, i32* %q, i64 1
-    %v1 = load i32, i32* %q1, align 4, !alias.scope !3, !noalias !0
+  define i32 @test_memcpy_inline(ptr nocapture %p, ptr nocapture readonly %q) {
+    %p0 = bitcast ptr %p to ptr
+    %add.ptr = getelementptr inbounds i32, ptr %p, i64 4
+    %p1 = bitcast ptr %add.ptr to ptr
+    tail call void @llvm.memcpy.inline.p0.p0.i64(ptr noundef nonnull align 4 dereferenceable(16) %p0, ptr noundef nonnull align 4 dereferenceable(16) %p1, i64 16, i1 false), !alias.scope !0, !noalias !3
+    %v0 = load i32, ptr %q, align 4, !alias.scope !3, !noalias !0
+    %q1 = getelementptr inbounds i32, ptr %q, i64 1
+    %v1 = load i32, ptr %q1, align 4, !alias.scope !3, !noalias !0
     %add = add i32 %v0, %v1
     ret i32 %add
   }
 
-  define i32 @test_mempcpy(i32* nocapture %p, i32* nocapture readonly %q) {
-    %p0 = bitcast i32* %p to i8*
-    %add.ptr = getelementptr inbounds i32, i32* %p, i64 4
-    %p1 = bitcast i32* %add.ptr to i8*
-    %call = tail call i8* @mempcpy(i8* noundef nonnull align 4 dereferenceable(16) %p0, i8* noundef nonnull align 4 dereferenceable(16) %p1, i64 16), !alias.scope !0, !noalias !3
-    %v0 = load i32, i32* %q, align 4, !alias.scope !3, !noalias !0
-    %q1 = getelementptr inbounds i32, i32* %q, i64 1
-    %v1 = load i32, i32* %q1, align 4, !alias.scope !3, !noalias !0
+  define i32 @test_mempcpy(ptr nocapture %p, ptr nocapture readonly %q) {
+    %p0 = bitcast ptr %p to ptr
+    %add.ptr = getelementptr inbounds i32, ptr %p, i64 4
+    %p1 = bitcast ptr %add.ptr to ptr
+    %call = tail call ptr @mempcpy(ptr noundef nonnull align 4 dereferenceable(16) %p0, ptr noundef nonnull align 4 dereferenceable(16) %p1, i64 16), !alias.scope !0, !noalias !3
+    %v0 = load i32, ptr %q, align 4, !alias.scope !3, !noalias !0
+    %q1 = getelementptr inbounds i32, ptr %q, i64 1
+    %v1 = load i32, ptr %q1, align 4, !alias.scope !3, !noalias !0
     %add = add i32 %v0, %v1
     ret i32 %add
   }
 
   ; Function Attrs: argmemonly nofree nounwind willreturn
-  declare void @llvm.memcpy.p0i8.p0i8.i64(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i64, i1 immarg) #0
+  declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg) #0
 
   ; Function Attrs: argmemonly nofree nounwind willreturn
-  declare void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i64 immarg, i1 immarg) #0
+  declare void @llvm.memcpy.inline.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64 immarg, i1 immarg) #0
 
-  declare i8* @mempcpy(i8*, i8*, i64)
+  declare ptr @mempcpy(ptr, ptr, i64)
 
   attributes #0 = { argmemonly nofree nounwind willreturn }
 

diff  --git a/llvm/test/CodeGen/MIR/AArch64/stack-object-local-offset.mir b/llvm/test/CodeGen/MIR/AArch64/stack-object-local-offset.mir
index b4dc46aec2ba7..a86826ce26219 100644
--- a/llvm/test/CodeGen/MIR/AArch64/stack-object-local-offset.mir
+++ b/llvm/test/CodeGen/MIR/AArch64/stack-object-local-offset.mir
@@ -2,14 +2,14 @@
 
 --- |
   @var = global i64 0
-  @local_addr = global i64* null
+  @local_addr = global ptr null
 
   define void @stack_local() {
   entry:
     %local_var = alloca i64
-    %val = load i64, i64* @var
-    store i64 %val, i64* %local_var
-    store i64* %local_var, i64** @local_addr
+    %val = load i64, ptr @var
+    store i64 %val, ptr %local_var
+    store ptr %local_var, ptr @local_addr
     ret void
   }
 ...

diff  --git a/llvm/test/CodeGen/MIR/AArch64/swp.mir b/llvm/test/CodeGen/MIR/AArch64/swp.mir
index 47a00f12efb74..535cdb5f6aac5 100644
--- a/llvm/test/CodeGen/MIR/AArch64/swp.mir
+++ b/llvm/test/CodeGen/MIR/AArch64/swp.mir
@@ -1,9 +1,9 @@
 # RUN: llc -mtriple=aarch64-none-linux-gnu -run-pass none -o - %s | FileCheck %s
 
 --- |
-  define i32 @swp(i32* %addr) #0 {
+  define i32 @swp(ptr %addr) #0 {
   entry:
-    %0 = atomicrmw xchg i32* %addr, i32 1 monotonic
+    %0 = atomicrmw xchg ptr %addr, i32 1 monotonic
     ret i32 %0
   }
 

diff  --git a/llvm/test/CodeGen/MIR/AArch64/target-flags.mir b/llvm/test/CodeGen/MIR/AArch64/target-flags.mir
index 16f9b43ca2f33..b0127d1189a0b 100644
--- a/llvm/test/CodeGen/MIR/AArch64/target-flags.mir
+++ b/llvm/test/CodeGen/MIR/AArch64/target-flags.mir
@@ -7,12 +7,12 @@
 
   define void @sub_small() {
   entry:
-    %val32 = load i32, i32* @var_i32
+    %val32 = load i32, ptr @var_i32
     %newval32 = sub i32 %val32, 4095
-    store i32 %newval32, i32* @var_i32
-    %val64 = load i64, i64* @var_i64
+    store i32 %newval32, ptr @var_i32
+    %val64 = load i64, ptr @var_i64
     %newval64 = sub i64 %val64, 52
-    store i64 %newval64, i64* @var_i64
+    store i64 %newval64, ptr @var_i64
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/MIR/AArch64/unnamed-stack.ll b/llvm/test/CodeGen/MIR/AArch64/unnamed-stack.ll
index c7e87e3f62a6a..69e54ba5cdb4f 100644
--- a/llvm/test/CodeGen/MIR/AArch64/unnamed-stack.ll
+++ b/llvm/test/CodeGen/MIR/AArch64/unnamed-stack.ll
@@ -7,7 +7,7 @@ entry:
   ; CHECK-NEXT:   - { id: 0, name: '',
   ; CHECK:      %0:_(p0) = G_FRAME_INDEX %stack.0
   %0 = alloca i16
-  %1 = load i16, i16* %0
+  %1 = load i16, ptr %0
   ret i16 %1
 }
 
@@ -18,6 +18,6 @@ entry:
   ; CHECK-NEXT:   - { id: 0, name: ptr,
   ; CHECK:      %0:_(p0) = G_FRAME_INDEX %stack.0.ptr
   %ptr = alloca i16
-  %0 = load i16, i16* %ptr
+  %0 = load i16, ptr %ptr
   ret i16 %0
 }

diff  --git a/llvm/test/CodeGen/MIR/AMDGPU/expected-target-index-name.mir b/llvm/test/CodeGen/MIR/AMDGPU/expected-target-index-name.mir
index 6c81e3b5380d2..d5d3777377712 100644
--- a/llvm/test/CodeGen/MIR/AMDGPU/expected-target-index-name.mir
+++ b/llvm/test/CodeGen/MIR/AMDGPU/expected-target-index-name.mir
@@ -6,11 +6,11 @@
 
   @float_gv = internal unnamed_addr addrspace(4) constant [5 x float] [float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00, float 4.000000e+00], align 4
 
-  define amdgpu_kernel void @float(float addrspace(1)* %out, i32 %index) #0 {
+  define amdgpu_kernel void @float(ptr addrspace(1) %out, i32 %index) #0 {
   entry:
-    %0 = getelementptr inbounds [5 x float], [5 x float] addrspace(4)* @float_gv, i32 0, i32 %index
-    %1 = load float, float addrspace(4)* %0
-    store float %1, float addrspace(1)* %out
+    %0 = getelementptr inbounds [5 x float], ptr addrspace(4) @float_gv, i32 0, i32 %index
+    %1 = load float, ptr addrspace(4) %0
+    store float %1, ptr addrspace(1) %out
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/MIR/AMDGPU/invalid-target-index-operand.mir b/llvm/test/CodeGen/MIR/AMDGPU/invalid-target-index-operand.mir
index 98c090edcffa2..ebefeddc7abfd 100644
--- a/llvm/test/CodeGen/MIR/AMDGPU/invalid-target-index-operand.mir
+++ b/llvm/test/CodeGen/MIR/AMDGPU/invalid-target-index-operand.mir
@@ -6,11 +6,11 @@
 
   @float_gv = internal unnamed_addr addrspace(4) constant [5 x float] [float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00, float 4.000000e+00], align 4
 
-  define amdgpu_kernel void @float(float addrspace(1)* %out, i32 %index) #0 {
+  define amdgpu_kernel void @float(ptr addrspace(1) %out, i32 %index) #0 {
   entry:
-    %0 = getelementptr inbounds [5 x float], [5 x float] addrspace(4)* @float_gv, i32 0, i32 %index
-    %1 = load float, float addrspace(4)* %0
-    store float %1, float addrspace(1)* %out
+    %0 = getelementptr inbounds [5 x float], ptr addrspace(4) @float_gv, i32 0, i32 %index
+    %1 = load float, ptr addrspace(4) %0
+    store float %1, ptr addrspace(1) %out
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/MIR/AMDGPU/machine-metadata-error.mir b/llvm/test/CodeGen/MIR/AMDGPU/machine-metadata-error.mir
index 60dabbd7178a5..9af81a6ff696b 100644
--- a/llvm/test/CodeGen/MIR/AMDGPU/machine-metadata-error.mir
+++ b/llvm/test/CodeGen/MIR/AMDGPU/machine-metadata-error.mir
@@ -5,7 +5,7 @@
   target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7"
   target triple = "amdgcn-amd-amdhsa"
 
-  define i32 @test_memcpy(i32 addrspace(1)* nocapture %p, i32 addrspace(1)* nocapture readonly %q) #0 {
+  define i32 @test_memcpy(ptr addrspace(1) nocapture %p, ptr addrspace(1) nocapture readonly %q) #0 {
     ret i32 0
   }
 ...

diff  --git a/llvm/test/CodeGen/MIR/AMDGPU/machine-metadata.mir b/llvm/test/CodeGen/MIR/AMDGPU/machine-metadata.mir
index 0eaf663450425..b54ae64032d42 100644
--- a/llvm/test/CodeGen/MIR/AMDGPU/machine-metadata.mir
+++ b/llvm/test/CodeGen/MIR/AMDGPU/machine-metadata.mir
@@ -6,26 +6,26 @@
   target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7"
   target triple = "amdgcn-amd-amdhsa"
 
-  define i32 @test_memcpy(i32 addrspace(1)* nocapture %p, i32 addrspace(1)* nocapture readonly %q) #0 {
-    %p0 = bitcast i32 addrspace(1)* %p to i8 addrspace(1)*
-    %add.ptr = getelementptr inbounds i32, i32 addrspace(1)* %p, i64 4
-    %p1 = bitcast i32 addrspace(1)* %add.ptr to i8 addrspace(1)*
-    tail call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* noundef nonnull align 4 dereferenceable(16) %p0, i8 addrspace(1)* noundef nonnull align 4 dereferenceable(16) %p1, i64 16, i1 false), !alias.scope !0, !noalias !3
-    %1 = bitcast i32 addrspace(1)* %q to <2 x i32> addrspace(1)*
-    %2 = load <2 x i32>, <2 x i32> addrspace(1)* %1, align 4, !alias.scope !3, !noalias !0
+  define i32 @test_memcpy(ptr addrspace(1) nocapture %p, ptr addrspace(1) nocapture readonly %q) #0 {
+    %p0 = bitcast ptr addrspace(1) %p to ptr addrspace(1)
+    %add.ptr = getelementptr inbounds i32, ptr addrspace(1) %p, i64 4
+    %p1 = bitcast ptr addrspace(1) %add.ptr to ptr addrspace(1)
+    tail call void @llvm.memcpy.p1.p1.i64(ptr addrspace(1) noundef nonnull align 4 dereferenceable(16) %p0, ptr addrspace(1) noundef nonnull align 4 dereferenceable(16) %p1, i64 16, i1 false), !alias.scope !0, !noalias !3
+    %1 = bitcast ptr addrspace(1) %q to ptr addrspace(1)
+    %2 = load <2 x i32>, ptr addrspace(1) %1, align 4, !alias.scope !3, !noalias !0
     %v01 = extractelement <2 x i32> %2, i32 0
     %v12 = extractelement <2 x i32> %2, i32 1
     %add = add i32 %v01, %v12
     ret i32 %add
   }
 
-  define i32 @test_memcpy_inline(i32 addrspace(1)* nocapture %p, i32 addrspace(1)* nocapture readonly %q) #0 {
-    %p0 = bitcast i32 addrspace(1)* %p to i8 addrspace(1)*
-    %add.ptr = getelementptr inbounds i32, i32 addrspace(1)* %p, i64 4
-    %p1 = bitcast i32 addrspace(1)* %add.ptr to i8 addrspace(1)*
-    tail call void @llvm.memcpy.inline.p1i8.p1i8.i64(i8 addrspace(1)* noundef nonnull align 4 dereferenceable(16) %p0, i8 addrspace(1)* noundef nonnull align 4 dereferenceable(16) %p1, i64 16, i1 false), !alias.scope !0, !noalias !3
-    %1 = bitcast i32 addrspace(1)* %q to <2 x i32> addrspace(1)*
-    %2 = load <2 x i32>, <2 x i32> addrspace(1)* %1, align 4, !alias.scope !3, !noalias !0
+  define i32 @test_memcpy_inline(ptr addrspace(1) nocapture %p, ptr addrspace(1) nocapture readonly %q) #0 {
+    %p0 = bitcast ptr addrspace(1) %p to ptr addrspace(1)
+    %add.ptr = getelementptr inbounds i32, ptr addrspace(1) %p, i64 4
+    %p1 = bitcast ptr addrspace(1) %add.ptr to ptr addrspace(1)
+    tail call void @llvm.memcpy.inline.p1.p1.i64(ptr addrspace(1) noundef nonnull align 4 dereferenceable(16) %p0, ptr addrspace(1) noundef nonnull align 4 dereferenceable(16) %p1, i64 16, i1 false), !alias.scope !0, !noalias !3
+    %1 = bitcast ptr addrspace(1) %q to ptr addrspace(1)
+    %2 = load <2 x i32>, ptr addrspace(1) %1, align 4, !alias.scope !3, !noalias !0
     %v01 = extractelement <2 x i32> %2, i32 0
     %v12 = extractelement <2 x i32> %2, i32 1
     %add = add i32 %v01, %v12
@@ -33,10 +33,10 @@
   }
 
   ; Function Attrs: argmemonly nofree nounwind willreturn
-  declare void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* noalias nocapture writeonly, i8 addrspace(1)* noalias nocapture readonly, i64, i1 immarg) #1
+  declare void @llvm.memcpy.p1.p1.i64(ptr addrspace(1) noalias nocapture writeonly, ptr addrspace(1) noalias nocapture readonly, i64, i1 immarg) #1
 
   ; Function Attrs: argmemonly nofree nounwind willreturn
-  declare void @llvm.memcpy.inline.p1i8.p1i8.i64(i8 addrspace(1)* noalias nocapture writeonly, i8 addrspace(1)* noalias nocapture readonly, i64 immarg, i1 immarg) #1
+  declare void @llvm.memcpy.inline.p1.p1.i64(ptr addrspace(1) noalias nocapture writeonly, ptr addrspace(1) noalias nocapture readonly, i64 immarg, i1 immarg) #1
 
   ; Function Attrs: convergent nounwind willreturn
   declare { i1, i32 } @llvm.amdgcn.if.i32(i1) #2

diff  --git a/llvm/test/CodeGen/MIR/AMDGPU/mircanon-memoperands.mir b/llvm/test/CodeGen/MIR/AMDGPU/mircanon-memoperands.mir
index eb338fab4cfe0..af0f28f6b5d74 100644
--- a/llvm/test/CodeGen/MIR/AMDGPU/mircanon-memoperands.mir
+++ b/llvm/test/CodeGen/MIR/AMDGPU/mircanon-memoperands.mir
@@ -3,7 +3,7 @@
 --- |
   target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64"
 
-  define amdgpu_kernel void @f(i32 addrspace(1)* nocapture %arg) {
+  define amdgpu_kernel void @f(ptr addrspace(1) nocapture %arg) {
     unreachable
   }
 ...
@@ -33,11 +33,11 @@ body:             |
     ; CHECK-NEXT: %bb0_{{[0-9a-f]+}}__1:sreg_64_xexec = S_LOAD_DWORDX2_IMM
 
     %0 = COPY $sgpr4_sgpr5
-    %1 = S_LOAD_DWORDX2_IMM %0, 0, 0 :: (non-temporal dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`)
-    %2 = S_LOAD_DWORDX2_IMM %0, 0, 0 :: (             dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`)
-    %3 = S_LOAD_DWORDX2_IMM %0, 0, 0 :: (                             invariant load (s64) from `i64 addrspace(4)* undef`)
-    %4 = S_LOAD_DWORDX2_IMM %0, 0, 0 :: (                                       load (s64) from `i64 addrspace(4)* undef`)
-    %5 = S_LOAD_DWORDX2_IMM %0, 0, 0 :: (                                       load (s64) from `i64 addrspace(2)* undef`)
-    %6 = S_LOAD_DWORDX2_IMM %0, 0, 0 :: (                                       load (s64) from `i64 addrspace(1)* undef`)
+    %1 = S_LOAD_DWORDX2_IMM %0, 0, 0 :: (non-temporal dereferenceable invariant load (s64) from `ptr addrspace(4) undef`)
+    %2 = S_LOAD_DWORDX2_IMM %0, 0, 0 :: (             dereferenceable invariant load (s64) from `ptr addrspace(4) undef`)
+    %3 = S_LOAD_DWORDX2_IMM %0, 0, 0 :: (                             invariant load (s64) from `ptr addrspace(4) undef`)
+    %4 = S_LOAD_DWORDX2_IMM %0, 0, 0 :: (                                       load (s64) from `ptr addrspace(4) undef`)
+    %5 = S_LOAD_DWORDX2_IMM %0, 0, 0 :: (                                       load (s64) from `ptr addrspace(2) undef`)
+    %6 = S_LOAD_DWORDX2_IMM %0, 0, 0 :: (                                       load (s64) from `ptr addrspace(1) undef`)
 
 ...

diff  --git a/llvm/test/CodeGen/MIR/AMDGPU/syncscopes.mir b/llvm/test/CodeGen/MIR/AMDGPU/syncscopes.mir
index a9cb0e4e96ef5..c28a4405d488c 100644
--- a/llvm/test/CodeGen/MIR/AMDGPU/syncscopes.mir
+++ b/llvm/test/CodeGen/MIR/AMDGPU/syncscopes.mir
@@ -6,11 +6,11 @@
   target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64"
   target triple = "amdgcn-amd-amdhsa"
 
-  define void @syncscopes(i32 %agent, i32 addrspace(4)* %agent_out, i32 %workgroup, i32 addrspace(4)* %workgroup_out, i32 %wavefront, i32 addrspace(4)* %wavefront_out) #0 {
+  define void @syncscopes(i32 %agent, ptr addrspace(4) %agent_out, i32 %workgroup, ptr addrspace(4) %workgroup_out, i32 %wavefront, ptr addrspace(4) %wavefront_out) #0 {
   entry:
-    store atomic i32 %agent, i32 addrspace(4)* %agent_out syncscope("agent") seq_cst, align 4, !nontemporal !0
-    store atomic i32 %workgroup, i32 addrspace(4)* %workgroup_out syncscope("workgroup") seq_cst, align 4, !nontemporal !0
-    store atomic i32 %wavefront, i32 addrspace(4)* %wavefront_out syncscope("wavefront") seq_cst, align 4, !nontemporal !0
+    store atomic i32 %agent, ptr addrspace(4) %agent_out syncscope("agent") seq_cst, align 4, !nontemporal !0
+    store atomic i32 %workgroup, ptr addrspace(4) %workgroup_out syncscope("workgroup") seq_cst, align 4, !nontemporal !0
+    store atomic i32 %wavefront, ptr addrspace(4) %wavefront_out syncscope("wavefront") seq_cst, align 4, !nontemporal !0
     ret void
   }
 
@@ -74,14 +74,14 @@ body:             |
     liveins: $sgpr4_sgpr5
 
     S_WAITCNT 0
-    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM $sgpr4_sgpr5, 8, 0 :: (non-temporal dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`)
-    $sgpr6 = S_LOAD_DWORD_IMM $sgpr4_sgpr5, 0, 0 :: (non-temporal dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`)
-    $sgpr2_sgpr3 = S_LOAD_DWORDX2_IMM $sgpr4_sgpr5, 24, 0 :: (non-temporal dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`)
-    $sgpr7 = S_LOAD_DWORD_IMM $sgpr4_sgpr5, 16, 0 :: (non-temporal dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`)
-    $sgpr8 = S_LOAD_DWORD_IMM $sgpr4_sgpr5, 32, 0 :: (non-temporal dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`)
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM $sgpr4_sgpr5, 8, 0 :: (non-temporal dereferenceable invariant load (s64) from `ptr addrspace(4) undef`)
+    $sgpr6 = S_LOAD_DWORD_IMM $sgpr4_sgpr5, 0, 0 :: (non-temporal dereferenceable invariant load (s32) from `ptr addrspace(4) undef`)
+    $sgpr2_sgpr3 = S_LOAD_DWORDX2_IMM $sgpr4_sgpr5, 24, 0 :: (non-temporal dereferenceable invariant load (s64) from `ptr addrspace(4) undef`)
+    $sgpr7 = S_LOAD_DWORD_IMM $sgpr4_sgpr5, 16, 0 :: (non-temporal dereferenceable invariant load (s32) from `ptr addrspace(4) undef`)
+    $sgpr8 = S_LOAD_DWORD_IMM $sgpr4_sgpr5, 32, 0 :: (non-temporal dereferenceable invariant load (s32) from `ptr addrspace(4) undef`)
     S_WAITCNT 127
     $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1
-    $sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM killed $sgpr4_sgpr5, 40, 0 :: (non-temporal dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`)
+    $sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM killed $sgpr4_sgpr5, 40, 0 :: (non-temporal dereferenceable invariant load (s64) from `ptr addrspace(4) undef`)
     $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit killed $sgpr0_sgpr1, implicit $sgpr0_sgpr1, implicit $exec
     $vgpr2 = V_MOV_B32_e32 killed $sgpr6, implicit $exec, implicit $exec
     FLAT_STORE_DWORD killed $vgpr0_vgpr1, killed $vgpr2, 0, 19, implicit $exec, implicit $flat_scr :: (volatile non-temporal store syncscope("agent") seq_cst (s32) into %ir.agent_out)

diff  --git a/llvm/test/CodeGen/MIR/AMDGPU/target-index-operands.mir b/llvm/test/CodeGen/MIR/AMDGPU/target-index-operands.mir
index 8eba896f70c6b..651cfe01dc2d2 100644
--- a/llvm/test/CodeGen/MIR/AMDGPU/target-index-operands.mir
+++ b/llvm/test/CodeGen/MIR/AMDGPU/target-index-operands.mir
@@ -7,19 +7,19 @@
 
   @float_gv = internal unnamed_addr addrspace(4) constant [5 x float] [float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00, float 4.000000e+00], align 4
 
-  define amdgpu_kernel void @float(float addrspace(1)* %out, i32 %index) #0 {
+  define amdgpu_kernel void @float(ptr addrspace(1) %out, i32 %index) #0 {
   entry:
-    %0 = getelementptr inbounds [5 x float], [5 x float] addrspace(4)* @float_gv, i32 0, i32 %index
-    %1 = load float, float addrspace(4)* %0
-    store float %1, float addrspace(1)* %out
+    %0 = getelementptr inbounds [5 x float], ptr addrspace(4) @float_gv, i32 0, i32 %index
+    %1 = load float, ptr addrspace(4) %0
+    store float %1, ptr addrspace(1) %out
     ret void
   }
 
-  define amdgpu_kernel void @float2(float addrspace(1)* %out, i32 %index) #0 {
+  define amdgpu_kernel void @float2(ptr addrspace(1) %out, i32 %index) #0 {
   entry:
-    %0 = getelementptr inbounds [5 x float], [5 x float] addrspace(4)* @float_gv, i32 0, i32 %index
-    %1 = load float, float addrspace(4)* %0
-    store float %1, float addrspace(1)* %out
+    %0 = getelementptr inbounds [5 x float], ptr addrspace(4) @float_gv, i32 0, i32 %index
+    %1 = load float, ptr addrspace(4) %0
+    store float %1, ptr addrspace(1) %out
     ret void
   }
   attributes #0 = { nounwind }

diff  --git a/llvm/test/CodeGen/MIR/ARM/cfi-same-value.mir b/llvm/test/CodeGen/MIR/ARM/cfi-same-value.mir
index e007f45efb959..dc23b9a01bb2e 100644
--- a/llvm/test/CodeGen/MIR/ARM/cfi-same-value.mir
+++ b/llvm/test/CodeGen/MIR/ARM/cfi-same-value.mir
@@ -1,12 +1,12 @@
 # RUN: llc -mtriple=arm-linux-unknown-gnueabi -run-pass none -o - %s | FileCheck %s
 
 --- |
-  declare void @dummy_use(i32*, i32)
+  declare void @dummy_use(ptr, i32)
 
   define void @test_basic() #0 {
   entry:
     %mem = alloca i32, i32 10
-    call void @dummy_use(i32* %mem, i32 10)
+    call void @dummy_use(ptr %mem, i32 10)
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/MIR/ARM/expected-closing-brace.mir b/llvm/test/CodeGen/MIR/ARM/expected-closing-brace.mir
index c7bc1c13a4c60..b5101951357be 100644
--- a/llvm/test/CodeGen/MIR/ARM/expected-closing-brace.mir
+++ b/llvm/test/CodeGen/MIR/ARM/expected-closing-brace.mir
@@ -16,7 +16,7 @@
     ret i32 %.
 
   if.else:
-    %b = load i32, i32* @G
+    %b = load i32, ptr @G
     %c = add i32 %b, 1
     br label %foo
   }

diff  --git a/llvm/test/CodeGen/MIR/ARM/thumb2-sub-sp-t3.mir b/llvm/test/CodeGen/MIR/ARM/thumb2-sub-sp-t3.mir
index 3f0ca25883d25..e8e35ef2f4d49 100644
--- a/llvm/test/CodeGen/MIR/ARM/thumb2-sub-sp-t3.mir
+++ b/llvm/test/CodeGen/MIR/ARM/thumb2-sub-sp-t3.mir
@@ -7,16 +7,16 @@
   define void @foo() #0 {
   entry:
     %v = alloca [4000 x i8], align 1
-    %s = alloca i8*, align 4
-    %0 = bitcast [4000 x i8]* %v to i8*
-    store i8* %0, i8** %s, align 4
-    %1 = load i8*, i8** %s, align 4
-    call void @bar(i8* %1)
+    %s = alloca ptr, align 4
+    %0 = bitcast ptr %v to ptr
+    store ptr %0, ptr %s, align 4
+    %1 = load ptr, ptr %s, align 4
+    call void @bar(ptr %1)
     ret void
   }
-  declare void @bar(i8*) #1
+  declare void @bar(ptr) #1
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #2
+  declare void @llvm.stackprotector(ptr, ptr) #2
 
   attributes #0 = { noinline nounwind optnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+armv7-a,+d32,+dsp,+fp64,+fpregs,+neon,+strict-align,+thumb-mode,+vfp2,+vfp2sp,+vfp3,+vfp3d16,+vfp3d16sp,+vfp3sp" "unsafe-fp-math"="false" "use-soft-float"="false" }
   attributes #1 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+armv7-a,+d32,+dsp,+fp64,+fpregs,+neon,+strict-align,+thumb-mode,+vfp2,+vfp2sp,+vfp3,+vfp3d16,+vfp3d16sp,+vfp3sp" "unsafe-fp-math"="false" "use-soft-float"="false" }

diff  --git a/llvm/test/CodeGen/MIR/Generic/frame-info.mir b/llvm/test/CodeGen/MIR/Generic/frame-info.mir
index c666482219e89..4a897a9ec5e3b 100644
--- a/llvm/test/CodeGen/MIR/Generic/frame-info.mir
+++ b/llvm/test/CodeGen/MIR/Generic/frame-info.mir
@@ -7,16 +7,16 @@
   define i32 @test(i32 %a) {
   entry:
     %b = alloca i32
-    store i32 %a, i32* %b
-    %c = load i32, i32* %b
+    store i32 %a, ptr %b
+    %c = load i32, ptr %b
     ret i32 %c
   }
 
   define i32 @test2(i32 %a) {
   entry:
     %b = alloca i32
-    store i32 %a, i32* %b
-    %c = load i32, i32* %b
+    store i32 %a, ptr %b
+    %c = load i32, ptr %b
     ret i32 %c
   }
 

diff  --git a/llvm/test/CodeGen/MIR/Generic/llvm-ir-error-reported.mir b/llvm/test/CodeGen/MIR/Generic/llvm-ir-error-reported.mir
index 15824cb2ca6d7..42c23b141c135 100644
--- a/llvm/test/CodeGen/MIR/Generic/llvm-ir-error-reported.mir
+++ b/llvm/test/CodeGen/MIR/Generic/llvm-ir-error-reported.mir
@@ -7,10 +7,10 @@
   ; CHECK: [[@LINE+3]]:15: use of undefined value '%a'
   define i32 @foo(i32 %x, i32 %y) {
     %z = alloca i32, align 4
-    store i32 %a, i32* %z, align 4
+    store i32 %a, ptr %z, align 4
     br label %Test
   Test:
-    %m = load i32, i32* %z, align 4
+    %m = load i32, ptr %z, align 4
     %cond = icmp eq i32 %y, %m
     br i1 %cond, label %IfEqual, label %IfUnequal
   IfEqual:

diff  --git a/llvm/test/CodeGen/MIR/Mips/memory-operands.mir b/llvm/test/CodeGen/MIR/Mips/memory-operands.mir
index 1cb228fdf31d2..c64b9dacd9a62 100644
--- a/llvm/test/CodeGen/MIR/Mips/memory-operands.mir
+++ b/llvm/test/CodeGen/MIR/Mips/memory-operands.mir
@@ -13,7 +13,7 @@
 
   define float @test2() #0 {
   entry:
-    %call = tail call float bitcast (float (...)* @g to float ()*)()
+    %call = tail call float @g()
     call void @__mips16_ret_sf(float %call)
     ret float %call
   }

diff  --git a/llvm/test/CodeGen/MIR/Mips/setRegClassOrRegBank.mir b/llvm/test/CodeGen/MIR/Mips/setRegClassOrRegBank.mir
index 80af292f13f22..f387a6cc604f7 100644
--- a/llvm/test/CodeGen/MIR/Mips/setRegClassOrRegBank.mir
+++ b/llvm/test/CodeGen/MIR/Mips/setRegClassOrRegBank.mir
@@ -10,7 +10,7 @@
 --- |
 
   declare <4 x i32> @llvm.mips.addvi.w(<4 x i32>, i32 immarg)
-  define void @add_v4i32_builtin_imm(<4 x i32>* %a, <4 x i32>* %c) { entry: ret void }
+  define void @add_v4i32_builtin_imm(ptr %a, ptr %c) { entry: ret void }
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/MIR/PowerPC/unordered-implicit-registers.mir b/llvm/test/CodeGen/MIR/PowerPC/unordered-implicit-registers.mir
index 406180b59ceae..c39b506667788 100644
--- a/llvm/test/CodeGen/MIR/PowerPC/unordered-implicit-registers.mir
+++ b/llvm/test/CodeGen/MIR/PowerPC/unordered-implicit-registers.mir
@@ -2,11 +2,11 @@
 # PR24724
 
 --- |
-  define signext i32 @main(i32* %p) #0 {
+  define signext i32 @main(ptr %p) #0 {
   entry:
-    %0 = load i32, i32* %p, align 4
+    %0 = load i32, ptr %p, align 4
     %or = or i32 0, %0
-    store i32 %or, i32* %p, align 4
+    store i32 %or, ptr %p, align 4
     %lnot.1 = icmp eq i32 undef, 0
     %lnot.ext.1 = zext i1 %lnot.1 to i32
     %shr.i.1 = lshr i32 2072, %lnot.ext.1

diff  --git a/llvm/test/CodeGen/MIR/X86/block-address-operands.mir b/llvm/test/CodeGen/MIR/X86/block-address-operands.mir
index 49a6b39bc2d4d..6108bef31a5a6 100644
--- a/llvm/test/CodeGen/MIR/X86/block-address-operands.mir
+++ b/llvm/test/CodeGen/MIR/X86/block-address-operands.mir
@@ -4,13 +4,13 @@
 
 --- |
 
-  @addr = global i8* null
+  @addr = global ptr null
 
   define void @test() {
   entry:
-    store volatile i8* blockaddress(@test, %block), i8** @addr
-    %val = load volatile i8*, i8** @addr
-    indirectbr i8* %val, [label %block]
+    store volatile ptr blockaddress(@test, %block), ptr @addr
+    %val = load volatile ptr, ptr @addr
+    indirectbr ptr %val, [label %block]
 
   block:
     ret void
@@ -18,34 +18,34 @@
 
   define void @test2() {
   entry:
-    store volatile i8* blockaddress(@test2, %"quoted block"), i8** @addr
-    %val = load volatile i8*, i8** @addr
-    indirectbr i8* %val, [label %"quoted block"]
+    store volatile ptr blockaddress(@test2, %"quoted block"), ptr @addr
+    %val = load volatile ptr, ptr @addr
+    indirectbr ptr %val, [label %"quoted block"]
 
   "quoted block":
     ret void
   }
 
-  define void @slot_in_other_function(i8** %addr) {
+  define void @slot_in_other_function(ptr %addr) {
   entry:
-    store volatile i8* blockaddress(@test3, %0), i8** %addr
+    store volatile ptr blockaddress(@test3, %0), ptr %addr
     ret void
   }
 
   define void @test3() {
   entry:
-    store volatile i8* blockaddress(@test3, %0), i8** @addr
-    %val = load volatile i8*, i8** @addr
-    indirectbr i8* %val, [label %0]
+    store volatile ptr blockaddress(@test3, %0), ptr @addr
+    %val = load volatile ptr, ptr @addr
+    indirectbr ptr %val, [label %0]
 
     ret void
   }
 
   define void @test4() {
   entry:
-    store volatile i8* blockaddress(@test4, %block), i8** @addr
-    %val = load volatile i8*, i8** @addr
-    indirectbr i8* %val, [label %block]
+    store volatile ptr blockaddress(@test4, %block), ptr @addr
+    %val = load volatile ptr, ptr @addr
+    indirectbr ptr %val, [label %block]
 
   block:
     ret void

diff  --git a/llvm/test/CodeGen/MIR/X86/branch-folder-with-label.mir b/llvm/test/CodeGen/MIR/X86/branch-folder-with-label.mir
index c3d1836e82378..ce225d4567e91 100644
--- a/llvm/test/CodeGen/MIR/X86/branch-folder-with-label.mir
+++ b/llvm/test/CodeGen/MIR/X86/branch-folder-with-label.mir
@@ -50,15 +50,15 @@
     ret i32 %a
   }
   
-  define i32 @baz(i32* %out) local_unnamed_addr !dbg !4 {
+  define i32 @baz(ptr %out) local_unnamed_addr !dbg !4 {
   entry:
-    %0 = load i32, i32* %out, align 4
+    %0 = load i32, ptr %out, align 4
     %call = tail call i32 @foo(i32 %0), !dbg !9
     %cmp = icmp slt i32 %call, 0
     br i1 %cmp, label %cleanup, label %if.then1
   
   if.then1:                                         ; preds = %entry
-    store i32 1, i32* %out, align 4
+    store i32 1, ptr %out, align 4
     br label %cleanup
   
   cleanup:                                          ; preds = %if.then1, %entry
@@ -66,11 +66,11 @@
     ret i32 %retval.0
   }
   
-  define i32 @test(%struct.bar* nocapture readonly %s) local_unnamed_addr !dbg !11 {
+  define i32 @test(ptr nocapture readonly %s) local_unnamed_addr !dbg !11 {
   entry:
     %idx = alloca i32, align 4
     call void @llvm.dbg.label(metadata !20), !dbg !21
-    %call58 = call i32 @baz(i32* nonnull %idx), !dbg !22
+    %call58 = call i32 @baz(ptr nonnull %idx), !dbg !22
     %cmp69 = icmp slt i32 %call58, 0
     br i1 %cmp69, label %if.then, label %do.cond.lr.ph.lr.ph
   
@@ -79,7 +79,7 @@
   
   retry.loopexit:                                   ; preds = %lor.rhs
     call void @llvm.dbg.label(metadata !20), !dbg !21
-    %call5 = call i32 @baz(i32* nonnull %idx), !dbg !22
+    %call5 = call i32 @baz(ptr nonnull %idx), !dbg !22
     %cmp6 = icmp slt i32 %call5, 0
     br i1 %cmp6, label %if.then, label %do.cond
   
@@ -88,18 +88,18 @@
     ret i32 %call.lcssa
   
   do.cond:                                          ; preds = %retry.loopexit, %do.body.backedge, %do.cond.lr.ph.lr.ph
-    %0 = load i32, i32* %idx, align 4
+    %0 = load i32, ptr %idx, align 4
     %cmp1 = icmp slt i32 %0, 0
     br i1 %cmp1, label %do.body.backedge, label %lor.rhs
   
   lor.rhs:                                          ; preds = %do.cond
-    %1 = bitcast %struct.bar* %s to i32*
-    %2 = load i32, i32* %1, align 4
+    %1 = bitcast ptr %s to ptr
+    %2 = load i32, ptr %1, align 4
     %tobool = icmp eq i32 %2, 0
     br i1 %tobool, label %do.body.backedge, label %retry.loopexit
   
   do.body.backedge:                                 ; preds = %lor.rhs, %do.cond
-    %call = call i32 @baz(i32* nonnull %idx), !dbg !22
+    %call = call i32 @baz(ptr nonnull %idx), !dbg !22
     %cmp = icmp slt i32 %call, 0
     br i1 %cmp, label %if.then, label %do.cond
   }
@@ -108,7 +108,7 @@
   declare void @llvm.dbg.label(metadata) #0
   
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #1
+  declare void @llvm.stackprotector(ptr, ptr) #1
   
   attributes #0 = { nounwind readnone speculatable }
   attributes #1 = { nounwind }

diff  --git a/llvm/test/CodeGen/MIR/X86/callee-saved-info.mir b/llvm/test/CodeGen/MIR/X86/callee-saved-info.mir
index 606abdd156578..8337117e3718e 100644
--- a/llvm/test/CodeGen/MIR/X86/callee-saved-info.mir
+++ b/llvm/test/CodeGen/MIR/X86/callee-saved-info.mir
@@ -12,7 +12,7 @@
   define i32 @func(i32 %a) {
   entry:
     %b = alloca i32
-    store i32 %a, i32* %b
+    store i32 %a, ptr %b
     br label %check
 
   check:
@@ -20,10 +20,10 @@
     br i1 %comp, label %loop, label %exit
 
   loop:
-    %c = load i32, i32* %b
+    %c = load i32, ptr %b
     %d = call i32 @compute(i32 %c)
     %e = sub i32 %d, 1
-    store i32 %e, i32* %b
+    store i32 %e, ptr %b
     br label %check
 
   exit:

diff  --git a/llvm/test/CodeGen/MIR/X86/diexpr-win32.mir b/llvm/test/CodeGen/MIR/X86/diexpr-win32.mir
index 6950f16799aa2..b1bcf24f8c5f4 100644
--- a/llvm/test/CodeGen/MIR/X86/diexpr-win32.mir
+++ b/llvm/test/CodeGen/MIR/X86/diexpr-win32.mir
@@ -65,23 +65,23 @@
   target datalayout = "e-m:x-p:32:32-i64:64-f80:32-n8:16:32-a:0:32-S32"
   target triple = "i386-pc-windows-msvc19.0.24215"
 
-  %struct.string = type { i32, i32, i8* }
+  %struct.string = type { i32, i32, ptr }
 
-  define void @fun(%struct.string* noalias sret(%struct.string) %agg.result, %struct.string* noalias %str) !dbg !12 {
+  define void @fun(ptr noalias sret(%struct.string) %agg.result, ptr noalias %str) !dbg !12 {
   entry:
-    call void @llvm.dbg.value(metadata %struct.string* %agg.result, metadata !23, metadata !24), !dbg !25
-    call void @llvm.dbg.value(metadata %struct.string* %str, metadata !26, metadata !28), !dbg !25
-    %call = call dereferenceable(12) %struct.string* @getString(), !dbg !29
-    %0 = bitcast %struct.string* %agg.result to i8*, !dbg !29
-    %1 = bitcast %struct.string* %call to i8*, !dbg !29
-    call void @llvm.memcpy.p0i8.p0i8.i32(i8* %0, i8* %1, i32 12, i32 4, i1 false), !dbg !29
+    call void @llvm.dbg.value(metadata ptr %agg.result, metadata !23, metadata !24), !dbg !25
+    call void @llvm.dbg.value(metadata ptr %str, metadata !26, metadata !28), !dbg !25
+    %call = call dereferenceable(12) ptr @getString(), !dbg !29
+    %0 = bitcast ptr %agg.result to ptr, !dbg !29
+    %1 = bitcast ptr %call to ptr, !dbg !29
+    call void @llvm.memcpy.p0.p0.i32(ptr %0, ptr %1, i32 12, i32 4, i1 false), !dbg !29
     ret void, !dbg !30
   }
 
-  define i32 @len(%struct.string* %s, i32 %acc) !dbg !31 {
+  define i32 @len(ptr %s, i32 %acc) !dbg !31 {
   entry:
-    %0 = bitcast %struct.string* %s to i32*
-    %bytes = load i32, i32* %0, !dbg !34
+    %0 = bitcast ptr %s to ptr
+    %bytes = load i32, ptr %0, !dbg !34
     call void @llvm.dbg.declare(metadata i32 %bytes, metadata !35, metadata !28), !dbg !34
     %1 = add i32 %bytes, %acc, !dbg !36
     ret i32 %1, !dbg !36
@@ -93,13 +93,13 @@
   ; Function Attrs: nounwind readnone speculatable
   declare void @llvm.dbg.value(metadata, metadata, metadata) #0
 
-  declare dereferenceable(12) %struct.string* @getString()
+  declare dereferenceable(12) ptr @getString()
 
   ; Function Attrs: argmemonly nounwind
-  declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i32, i1) #1
+  declare void @llvm.memcpy.p0.p0.i32(ptr nocapture writeonly, ptr nocapture readonly, i32, i32, i1) #1
 
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #2
+  declare void @llvm.stackprotector(ptr, ptr) #2
 
   attributes #0 = { nounwind readnone speculatable }
   attributes #1 = { argmemonly nounwind }

diff  --git a/llvm/test/CodeGen/MIR/X86/duplicate-memory-operand-flag.mir b/llvm/test/CodeGen/MIR/X86/duplicate-memory-operand-flag.mir
index 6f6d8377b8517..6b6adf3d08dc1 100644
--- a/llvm/test/CodeGen/MIR/X86/duplicate-memory-operand-flag.mir
+++ b/llvm/test/CodeGen/MIR/X86/duplicate-memory-operand-flag.mir
@@ -2,11 +2,11 @@
 
 --- |
 
-  define i32 @volatile_inc(i32* %x) {
+  define i32 @volatile_inc(ptr %x) {
   entry:
-    %0 = load volatile i32, i32* %x
+    %0 = load volatile i32, ptr %x
     %1 = add i32 %0, 1
-    store volatile i32 %1, i32* %x
+    store volatile i32 %1, ptr %x
     ret i32 %1
   }
 

diff  --git a/llvm/test/CodeGen/MIR/X86/expected-align-in-memory-operand.mir b/llvm/test/CodeGen/MIR/X86/expected-align-in-memory-operand.mir
index 94c06fea3419b..b1d3745015122 100644
--- a/llvm/test/CodeGen/MIR/X86/expected-align-in-memory-operand.mir
+++ b/llvm/test/CodeGen/MIR/X86/expected-align-in-memory-operand.mir
@@ -2,11 +2,11 @@
 
 --- |
 
-  define void @memory_alignment(<8 x float>* %vec) {
+  define void @memory_alignment(ptr %vec) {
   entry:
-    %v = load <8 x float>, <8 x float>* %vec
+    %v = load <8 x float>, ptr %vec
     %v2 = insertelement <8 x float> %v, float 0.0, i32 4
-    store <8 x float> %v2, <8 x float>* %vec
+    store <8 x float> %v2, ptr %vec
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/MIR/X86/expected-alignment-after-align-in-memory-operand.mir b/llvm/test/CodeGen/MIR/X86/expected-alignment-after-align-in-memory-operand.mir
index bc7eb83cbc2d0..1b31b5ee44110 100644
--- a/llvm/test/CodeGen/MIR/X86/expected-alignment-after-align-in-memory-operand.mir
+++ b/llvm/test/CodeGen/MIR/X86/expected-alignment-after-align-in-memory-operand.mir
@@ -2,11 +2,11 @@
 
 --- |
 
-  define void @memory_alignment(<8 x float>* %vec) {
+  define void @memory_alignment(ptr %vec) {
   entry:
-    %v = load <8 x float>, <8 x float>* %vec
+    %v = load <8 x float>, ptr %vec
     %v2 = insertelement <8 x float> %v, float 0.0, i32 4
-    store <8 x float> %v2, <8 x float>* %vec
+    store <8 x float> %v2, ptr %vec
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/MIR/X86/expected-block-reference-in-blockaddress.mir b/llvm/test/CodeGen/MIR/X86/expected-block-reference-in-blockaddress.mir
index 6fbe38df85447..2a47393f8d64f 100644
--- a/llvm/test/CodeGen/MIR/X86/expected-block-reference-in-blockaddress.mir
+++ b/llvm/test/CodeGen/MIR/X86/expected-block-reference-in-blockaddress.mir
@@ -3,13 +3,13 @@
 
 --- |
 
-  @addr = global i8* null
+  @addr = global ptr null
 
   define void @test() {
   entry:
-    store volatile i8* blockaddress(@test, %block), i8** @addr
-    %val = load volatile i8*, i8** @addr
-    indirectbr i8* %val, [label %block]
+    store volatile ptr blockaddress(@test, %block), ptr @addr
+    %val = load volatile ptr, ptr @addr
+    indirectbr ptr %val, [label %block]
 
   block:
     ret void

diff  --git a/llvm/test/CodeGen/MIR/X86/expected-comma-after-memory-operand.mir b/llvm/test/CodeGen/MIR/X86/expected-comma-after-memory-operand.mir
index eba584e129eb3..a21ed0e6188c9 100644
--- a/llvm/test/CodeGen/MIR/X86/expected-comma-after-memory-operand.mir
+++ b/llvm/test/CodeGen/MIR/X86/expected-comma-after-memory-operand.mir
@@ -2,11 +2,11 @@
 
 --- |
 
-  define void @test(i32* %a) {
+  define void @test(ptr %a) {
   entry2:
-    %b = load i32, i32* %a
+    %b = load i32, ptr %a
     %c = add i32 %b, 1
-    store i32 %c, i32* %a
+    store i32 %c, ptr %a
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/MIR/X86/expected-
diff erent-implicit-operand.mir b/llvm/test/CodeGen/MIR/X86/expected-
diff erent-implicit-operand.mir
index 98a6173937028..5e81481eda9a8 100644
--- a/llvm/test/CodeGen/MIR/X86/expected-
diff erent-implicit-operand.mir
+++ b/llvm/test/CodeGen/MIR/X86/expected-
diff erent-implicit-operand.mir
@@ -2,9 +2,9 @@
 
 --- |
 
-  define i32 @foo(i32* %p) {
+  define i32 @foo(ptr %p) {
   entry:
-    %a = load i32, i32* %p
+    %a = load i32, ptr %p
     %0 = icmp sle i32 %a, 10
     br i1 %0, label %less, label %exit
 

diff  --git a/llvm/test/CodeGen/MIR/X86/expected-
diff erent-implicit-register-flag.mir b/llvm/test/CodeGen/MIR/X86/expected-
diff erent-implicit-register-flag.mir
index 5bfff0a776a03..ae36a3e54ae33 100644
--- a/llvm/test/CodeGen/MIR/X86/expected-
diff erent-implicit-register-flag.mir
+++ b/llvm/test/CodeGen/MIR/X86/expected-
diff erent-implicit-register-flag.mir
@@ -2,9 +2,9 @@
 
 --- |
 
-  define i32 @foo(i32* %p) {
+  define i32 @foo(ptr %p) {
   entry:
-    %a = load i32, i32* %p
+    %a = load i32, ptr %p
     %0 = icmp sle i32 %a, 10
     br i1 %0, label %less, label %exit
 

diff  --git a/llvm/test/CodeGen/MIR/X86/expected-function-reference-after-blockaddress.mir b/llvm/test/CodeGen/MIR/X86/expected-function-reference-after-blockaddress.mir
index aabcf58828ea7..27f44fdd3cda1 100644
--- a/llvm/test/CodeGen/MIR/X86/expected-function-reference-after-blockaddress.mir
+++ b/llvm/test/CodeGen/MIR/X86/expected-function-reference-after-blockaddress.mir
@@ -2,13 +2,13 @@
 
 --- |
 
-  @addr = global i8* null
+  @addr = global ptr null
 
   define void @test() {
   entry:
-    store volatile i8* blockaddress(@test, %block), i8** @addr
-    %val = load volatile i8*, i8** @addr
-    indirectbr i8* %val, [label %block]
+    store volatile ptr blockaddress(@test, %block), ptr @addr
+    %val = load volatile ptr, ptr @addr
+    indirectbr ptr %val, [label %block]
 
   block:
     ret void

diff  --git a/llvm/test/CodeGen/MIR/X86/expected-global-value-after-blockaddress.mir b/llvm/test/CodeGen/MIR/X86/expected-global-value-after-blockaddress.mir
index 531ba229b01a4..cb75cdd3cfbcd 100644
--- a/llvm/test/CodeGen/MIR/X86/expected-global-value-after-blockaddress.mir
+++ b/llvm/test/CodeGen/MIR/X86/expected-global-value-after-blockaddress.mir
@@ -2,13 +2,13 @@
 
 --- |
 
-  @addr = global i8* null
+  @addr = global ptr null
 
   define void @test() {
   entry:
-    store volatile i8* blockaddress(@test, %block), i8** @addr
-    %val = load volatile i8*, i8** @addr
-    indirectbr i8* %val, [label %block]
+    store volatile ptr blockaddress(@test, %block), ptr @addr
+    %val = load volatile ptr, ptr @addr
+    indirectbr ptr %val, [label %block]
 
   block:
     ret void

diff  --git a/llvm/test/CodeGen/MIR/X86/expected-integer-after-offset-sign.mir b/llvm/test/CodeGen/MIR/X86/expected-integer-after-offset-sign.mir
index 6870b860c191d..9f1a15b500b6b 100644
--- a/llvm/test/CodeGen/MIR/X86/expected-integer-after-offset-sign.mir
+++ b/llvm/test/CodeGen/MIR/X86/expected-integer-after-offset-sign.mir
@@ -6,7 +6,7 @@
 
   define i32 @inc() {
   entry:
-    %a = load i32, i32* @G
+    %a = load i32, ptr @G
     %b = add i32 %a, 1
     ret i32 %b
   }

diff  --git a/llvm/test/CodeGen/MIR/X86/expected-load-or-store-in-memory-operand.mir b/llvm/test/CodeGen/MIR/X86/expected-load-or-store-in-memory-operand.mir
index 4d262ef398e38..cb7fb2586e41a 100644
--- a/llvm/test/CodeGen/MIR/X86/expected-load-or-store-in-memory-operand.mir
+++ b/llvm/test/CodeGen/MIR/X86/expected-load-or-store-in-memory-operand.mir
@@ -2,9 +2,9 @@
 
 --- |
 
-  define i32 @test(i32* %a) {
+  define i32 @test(ptr %a) {
   entry:
-    %b = load i32, i32* %a
+    %b = load i32, ptr %a
     ret i32 %b
   }
 

diff  --git a/llvm/test/CodeGen/MIR/X86/expected-metadata-node-after-debug-location.mir b/llvm/test/CodeGen/MIR/X86/expected-metadata-node-after-debug-location.mir
index 9478786d55678..f79b93c677749 100644
--- a/llvm/test/CodeGen/MIR/X86/expected-metadata-node-after-debug-location.mir
+++ b/llvm/test/CodeGen/MIR/X86/expected-metadata-node-after-debug-location.mir
@@ -5,9 +5,9 @@
   define i32 @test(i32 %x) #0 !dbg !4 {
   entry:
     %x.addr = alloca i32, align 4
-    store i32 %x, i32* %x.addr, align 4
-    call void @llvm.dbg.declare(metadata i32* %x.addr, metadata !12, metadata !13), !dbg !14
-    %0 = load i32, i32* %x.addr, align 4, !dbg !15
+    store i32 %x, ptr %x.addr, align 4
+    call void @llvm.dbg.declare(metadata ptr %x.addr, metadata !12, metadata !13), !dbg !14
+    %0 = load i32, ptr %x.addr, align 4, !dbg !15
     ret i32 %0, !dbg !15
   }
 

diff  --git a/llvm/test/CodeGen/MIR/X86/expected-metadata-node-after-exclaim.mir b/llvm/test/CodeGen/MIR/X86/expected-metadata-node-after-exclaim.mir
index d04ef11cbfb84..4befc53947601 100644
--- a/llvm/test/CodeGen/MIR/X86/expected-metadata-node-after-exclaim.mir
+++ b/llvm/test/CodeGen/MIR/X86/expected-metadata-node-after-exclaim.mir
@@ -5,9 +5,9 @@
   define i32 @test(i32 %x) #0 !dbg !4 {
   entry:
     %x.addr = alloca i32, align 4
-    store i32 %x, i32* %x.addr, align 4
-    call void @llvm.dbg.declare(metadata i32* %x.addr, metadata !12, metadata !13), !dbg !14
-    %0 = load i32, i32* %x.addr, align 4, !dbg !15
+    store i32 %x, ptr %x.addr, align 4
+    call void @llvm.dbg.declare(metadata ptr %x.addr, metadata !12, metadata !13), !dbg !14
+    %0 = load i32, ptr %x.addr, align 4, !dbg !15
     ret i32 %0, !dbg !15
   }
 

diff  --git a/llvm/test/CodeGen/MIR/X86/expected-metadata-node-in-stack-object.mir b/llvm/test/CodeGen/MIR/X86/expected-metadata-node-in-stack-object.mir
index fe5785038beac..4ef479d13b0ab 100644
--- a/llvm/test/CodeGen/MIR/X86/expected-metadata-node-in-stack-object.mir
+++ b/llvm/test/CodeGen/MIR/X86/expected-metadata-node-in-stack-object.mir
@@ -3,8 +3,8 @@
   define i32 @test(i32 %x) {
   entry:
     %xa = alloca i32, align 4
-    store i32 %x, i32* %xa, align 4
-    %0 = load i32, i32* %xa, align 4
+    store i32 %x, ptr %xa, align 4
+    %0 = load i32, ptr %xa, align 4
     ret i32 %0
   }
 ...

diff  --git a/llvm/test/CodeGen/MIR/X86/expected-named-register-in-callee-saved-register.mir b/llvm/test/CodeGen/MIR/X86/expected-named-register-in-callee-saved-register.mir
index 05836e7bab2eb..30f80f9ed719f 100644
--- a/llvm/test/CodeGen/MIR/X86/expected-named-register-in-callee-saved-register.mir
+++ b/llvm/test/CodeGen/MIR/X86/expected-named-register-in-callee-saved-register.mir
@@ -10,7 +10,7 @@
   define i32 @func(i32 %a) {
   entry:
     %b = alloca i32
-    store i32 %a, i32* %b
+    store i32 %a, ptr %b
     br label %check
 
   check:
@@ -18,10 +18,10 @@
     br i1 %comp, label %loop, label %exit
 
   loop:
-    %c = load i32, i32* %b
+    %c = load i32, ptr %b
     %d = call i32 @compute(i32 %c)
     %e = sub i32 %d, 1
-    store i32 %e, i32* %b
+    store i32 %e, ptr %b
     br label %check
 
   exit:

diff  --git a/llvm/test/CodeGen/MIR/X86/expected-number-after-bb.mir b/llvm/test/CodeGen/MIR/X86/expected-number-after-bb.mir
index e9a1a9e4ee875..b0df32477833f 100644
--- a/llvm/test/CodeGen/MIR/X86/expected-number-after-bb.mir
+++ b/llvm/test/CodeGen/MIR/X86/expected-number-after-bb.mir
@@ -2,9 +2,9 @@
 
 --- |
 
-  define i32 @foo(i32* %p) {
+  define i32 @foo(ptr %p) {
   entry:
-    %a = load i32, i32* %p
+    %a = load i32, ptr %p
     %b = icmp sle i32 %a, 10
     br i1 %b, label %yes, label %nah
 

diff  --git a/llvm/test/CodeGen/MIR/X86/expected-pointer-value-in-memory-operand.mir b/llvm/test/CodeGen/MIR/X86/expected-pointer-value-in-memory-operand.mir
index d94dc3ffb7e0c..21970038dd041 100644
--- a/llvm/test/CodeGen/MIR/X86/expected-pointer-value-in-memory-operand.mir
+++ b/llvm/test/CodeGen/MIR/X86/expected-pointer-value-in-memory-operand.mir
@@ -2,9 +2,9 @@
 
 --- |
 
-  define i32 @test(i32* %a) {
+  define i32 @test(ptr %a) {
   entry:
-    %b = load i32, i32* %a
+    %b = load i32, ptr %a
     ret i32 %b
   }
 

diff  --git a/llvm/test/CodeGen/MIR/X86/expected-positive-alignment-after-align.mir b/llvm/test/CodeGen/MIR/X86/expected-positive-alignment-after-align.mir
index 1f033c5d7d52b..d2beda69696e4 100644
--- a/llvm/test/CodeGen/MIR/X86/expected-positive-alignment-after-align.mir
+++ b/llvm/test/CodeGen/MIR/X86/expected-positive-alignment-after-align.mir
@@ -2,11 +2,11 @@
 
 --- |
 
-  define void @memory_alignment(<8 x float>* %vec) {
+  define void @memory_alignment(ptr %vec) {
   entry:
-    %v = load <8 x float>, <8 x float>* %vec
+    %v = load <8 x float>, ptr %vec
     %v2 = insertelement <8 x float> %v, float 0.0, i32 4
-    store <8 x float> %v2, <8 x float>* %vec
+    store <8 x float> %v2, ptr %vec
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/MIR/X86/expected-size-integer-after-memory-operation.mir b/llvm/test/CodeGen/MIR/X86/expected-size-integer-after-memory-operation.mir
index ff9dfba5cd1cc..98073d91843fd 100644
--- a/llvm/test/CodeGen/MIR/X86/expected-size-integer-after-memory-operation.mir
+++ b/llvm/test/CodeGen/MIR/X86/expected-size-integer-after-memory-operation.mir
@@ -2,9 +2,9 @@
 
 --- |
 
-  define i32 @test(i32* %a) {
+  define i32 @test(ptr %a) {
   entry:
-    %b = load i32, i32* %a
+    %b = load i32, ptr %a
     ret i32 %b
   }
 

diff  --git a/llvm/test/CodeGen/MIR/X86/expected-size-integer-after-memory-operation2.mir b/llvm/test/CodeGen/MIR/X86/expected-size-integer-after-memory-operation2.mir
index 71edc242f1662..8b56239dc0fb9 100644
--- a/llvm/test/CodeGen/MIR/X86/expected-size-integer-after-memory-operation2.mir
+++ b/llvm/test/CodeGen/MIR/X86/expected-size-integer-after-memory-operation2.mir
@@ -2,9 +2,9 @@
 
 --- |
 
-  define i32 @test(i32* %a) {
+  define i32 @test(ptr %a) {
   entry:
-    %b = load i32, i32* %a
+    %b = load i32, ptr %a
     ret i32 %b
   }
 

diff  --git a/llvm/test/CodeGen/MIR/X86/expected-stack-object.mir b/llvm/test/CodeGen/MIR/X86/expected-stack-object.mir
index c9f6c9902b492..e819e3fa772dc 100644
--- a/llvm/test/CodeGen/MIR/X86/expected-stack-object.mir
+++ b/llvm/test/CodeGen/MIR/X86/expected-stack-object.mir
@@ -3,27 +3,27 @@
 
 --- |
   @.str = private unnamed_addr constant [4 x i8] c"%s\0A\00", align 1
-  @__stack_chk_guard = external global i8*
+  @__stack_chk_guard = external global ptr
 
   define i32 @test() #0 {
   entry:
-    %StackGuardSlot = alloca i8*
-    %StackGuard = load i8*, i8** @__stack_chk_guard
-    call void @llvm.stackprotector(i8* %StackGuard, i8** %StackGuardSlot)
-    %test = alloca i8*, align 8
+    %StackGuardSlot = alloca ptr
+    %StackGuard = load ptr, ptr @__stack_chk_guard
+    call void @llvm.stackprotector(ptr %StackGuard, ptr %StackGuardSlot)
+    %test = alloca ptr, align 8
     %a = alloca i8, i64 5
-    store i8* %a, i8** %test, align 8
-    %b = load i8*, i8** %test, align 8
-    %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), i8* %b)
-    call void @llvm.stackprotectorcheck(i8** @__stack_chk_guard)
+    store ptr %a, ptr %test, align 8
+    %b = load ptr, ptr %test, align 8
+    %call = call i32 (ptr, ...) @printf(ptr @.str, ptr %b)
+    call void @llvm.stackprotectorcheck(ptr @__stack_chk_guard)
     ret i32 %call
   }
 
-  declare i32 @printf(i8*, ...)
+  declare i32 @printf(ptr, ...)
 
-  declare void @llvm.stackprotector(i8*, i8**) #1
+  declare void @llvm.stackprotector(ptr, ptr) #1
 
-  declare void @llvm.stackprotectorcheck(i8**) #2
+  declare void @llvm.stackprotectorcheck(ptr) #2
 
   attributes #0 = { ssp "stack-protector-buffer-size"="5" }
   attributes #1 = { nounwind }

diff  --git a/llvm/test/CodeGen/MIR/X86/expected-target-flag-name.mir b/llvm/test/CodeGen/MIR/X86/expected-target-flag-name.mir
index b2fe4c009d5cd..3f08e46f8971b 100644
--- a/llvm/test/CodeGen/MIR/X86/expected-target-flag-name.mir
+++ b/llvm/test/CodeGen/MIR/X86/expected-target-flag-name.mir
@@ -6,7 +6,7 @@
 
   define i32 @inc() {
   entry:
-    %a = load i32, i32* @G
+    %a = load i32, ptr @G
     %b = add i32 %a, 1
     ret i32 %b
   }

diff  --git a/llvm/test/CodeGen/MIR/X86/expected-value-in-memory-operand.mir b/llvm/test/CodeGen/MIR/X86/expected-value-in-memory-operand.mir
index 5d1c750227930..87995babe09c2 100644
--- a/llvm/test/CodeGen/MIR/X86/expected-value-in-memory-operand.mir
+++ b/llvm/test/CodeGen/MIR/X86/expected-value-in-memory-operand.mir
@@ -2,9 +2,9 @@
 
 --- |
 
-  define i32 @test(i32* %a) {
+  define i32 @test(ptr %a) {
   entry:
-    %b = load i32, i32* %a
+    %b = load i32, ptr %a
     ret i32 %b
   }
 

diff  --git a/llvm/test/CodeGen/MIR/X86/external-symbol-operands.mir b/llvm/test/CodeGen/MIR/X86/external-symbol-operands.mir
index ff5c8d4dbc3fa..785662e59a499 100644
--- a/llvm/test/CodeGen/MIR/X86/external-symbol-operands.mir
+++ b/llvm/test/CodeGen/MIR/X86/external-symbol-operands.mir
@@ -3,24 +3,24 @@
 # operands correctly.
 
 --- |
-  @__stack_chk_guard = external global i8*
+  @__stack_chk_guard = external global ptr
 
   define i32 @test(i32 %n) #0 {
   entry:
-    %StackGuardSlot = alloca i8*
-    %StackGuard = load i8*, i8** @__stack_chk_guard
-    call void @llvm.stackprotector(i8* %StackGuard, i8** %StackGuardSlot)
+    %StackGuardSlot = alloca ptr
+    %StackGuard = load ptr, ptr @__stack_chk_guard
+    call void @llvm.stackprotector(ptr %StackGuard, ptr %StackGuardSlot)
     %a = alloca [128 x i32], align 16
     %idxprom = sext i32 %n to i64
-    %arrayidx = getelementptr inbounds [128 x i32], [128 x i32]* %a, i64 0, i64 %idxprom
-    %0 = load i32, i32* %arrayidx, align 4
-    call void @llvm.stackprotectorcheck(i8** @__stack_chk_guard)
+    %arrayidx = getelementptr inbounds [128 x i32], ptr %a, i64 0, i64 %idxprom
+    %0 = load i32, ptr %arrayidx, align 4
+    call void @llvm.stackprotectorcheck(ptr @__stack_chk_guard)
     ret i32 %0
   }
 
-  declare void @llvm.stackprotector(i8*, i8**) #1
+  declare void @llvm.stackprotector(ptr, ptr) #1
 
-  declare void @llvm.stackprotectorcheck(i8**) #1
+  declare void @llvm.stackprotectorcheck(ptr) #1
 
   attributes #0 = { ssp "stack-protector-buffer-size"="8" }
   attributes #1 = { nounwind }

diff  --git a/llvm/test/CodeGen/MIR/X86/fixed-stack-di.mir b/llvm/test/CodeGen/MIR/X86/fixed-stack-di.mir
index 5bdece87aa83f..fca249320bc82 100644
--- a/llvm/test/CodeGen/MIR/X86/fixed-stack-di.mir
+++ b/llvm/test/CodeGen/MIR/X86/fixed-stack-di.mir
@@ -8,9 +8,9 @@
 
   declare void @llvm.dbg.declare(metadata, metadata, metadata) #0
 
-  define hidden void @foo(i32* byval(i32) %dstRect) {
+  define hidden void @foo(ptr byval(i32) %dstRect) {
   entry:
-    call void @llvm.dbg.declare(metadata i32* %dstRect, metadata !3, metadata !DIExpression()), !dbg !5
+    call void @llvm.dbg.declare(metadata ptr %dstRect, metadata !3, metadata !DIExpression()), !dbg !5
     unreachable
   }
 

diff  --git a/llvm/test/CodeGen/MIR/X86/fixed-stack-memory-operands.mir b/llvm/test/CodeGen/MIR/X86/fixed-stack-memory-operands.mir
index fa99421460dc6..d59085128c0ad 100644
--- a/llvm/test/CodeGen/MIR/X86/fixed-stack-memory-operands.mir
+++ b/llvm/test/CodeGen/MIR/X86/fixed-stack-memory-operands.mir
@@ -7,8 +7,8 @@
   define i32 @test(i32 %a) #0 {
   entry:
     %b = alloca i32
-    store i32 %a, i32* %b
-    %c = load i32, i32* %b
+    store i32 %a, ptr %b
+    %c = load i32, ptr %b
     ret i32 %c
   }
 

diff  --git a/llvm/test/CodeGen/MIR/X86/fixed-stack-objects.mir b/llvm/test/CodeGen/MIR/X86/fixed-stack-objects.mir
index 9eba3eaf4bade..7cba6e3d03c28 100644
--- a/llvm/test/CodeGen/MIR/X86/fixed-stack-objects.mir
+++ b/llvm/test/CodeGen/MIR/X86/fixed-stack-objects.mir
@@ -6,8 +6,8 @@
   define i32 @test(i32 %a) #0 {
   entry:
     %b = alloca i32
-    store i32 %a, i32* %b
-    %c = load i32, i32* %b
+    store i32 %a, ptr %b
+    %c = load i32, ptr %b
     ret i32 %c
   }
 

diff  --git a/llvm/test/CodeGen/MIR/X86/frame-info-save-restore-points.mir b/llvm/test/CodeGen/MIR/X86/frame-info-save-restore-points.mir
index cb804b3cbfabb..0c68a3a3ed1a7 100644
--- a/llvm/test/CodeGen/MIR/X86/frame-info-save-restore-points.mir
+++ b/llvm/test/CodeGen/MIR/X86/frame-info-save-restore-points.mir
@@ -11,8 +11,8 @@
     br i1 %tmp2, label %true, label %false
 
   true:
-    store i32 %a, i32* %tmp, align 4
-    %tmp4 = call i32 @doSomething(i32 0, i32* %tmp)
+    store i32 %a, ptr %tmp, align 4
+    %tmp4 = call i32 @doSomething(i32 0, ptr %tmp)
     br label %false
 
   false:
@@ -20,7 +20,7 @@
     ret i32 %tmp.0
   }
 
-  declare i32 @doSomething(i32, i32*)
+  declare i32 @doSomething(i32, ptr)
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/MIR/X86/frame-info-stack-references.mir b/llvm/test/CodeGen/MIR/X86/frame-info-stack-references.mir
index 8c4583ade6328..2aa4692872915 100644
--- a/llvm/test/CodeGen/MIR/X86/frame-info-stack-references.mir
+++ b/llvm/test/CodeGen/MIR/X86/frame-info-stack-references.mir
@@ -4,27 +4,27 @@
 
 --- |
   @.str = private unnamed_addr constant [4 x i8] c"%s\0A\00", align 1
-  @__stack_chk_guard = external global i8*
+  @__stack_chk_guard = external global ptr
 
   define i32 @test() #0 {
   entry:
-    %StackGuardSlot = alloca i8*
-    %StackGuard = load i8*, i8** @__stack_chk_guard
-    call void @llvm.stackprotector(i8* %StackGuard, i8** %StackGuardSlot)
-    %test = alloca i8*, align 8
+    %StackGuardSlot = alloca ptr
+    %StackGuard = load ptr, ptr @__stack_chk_guard
+    call void @llvm.stackprotector(ptr %StackGuard, ptr %StackGuardSlot)
+    %test = alloca ptr, align 8
     %a = alloca i8, i64 5
-    store i8* %a, i8** %test, align 8
-    %b = load i8*, i8** %test, align 8
-    %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), i8* %b)
-    call void @llvm.stackprotectorcheck(i8** @__stack_chk_guard)
+    store ptr %a, ptr %test, align 8
+    %b = load ptr, ptr %test, align 8
+    %call = call i32 (ptr, ...) @printf(ptr @.str, ptr %b)
+    call void @llvm.stackprotectorcheck(ptr @__stack_chk_guard)
     ret i32 %call
   }
 
-  declare i32 @printf(i8*, ...)
+  declare i32 @printf(ptr, ...)
 
-  declare void @llvm.stackprotector(i8*, i8**) #1
+  declare void @llvm.stackprotector(ptr, ptr) #1
 
-  declare void @llvm.stackprotectorcheck(i8**) #2
+  declare void @llvm.stackprotectorcheck(ptr) #2
 
   attributes #0 = { ssp "stack-protector-buffer-size"="5" }
   attributes #1 = { nounwind }

diff  --git a/llvm/test/CodeGen/MIR/X86/global-value-operands.mir b/llvm/test/CodeGen/MIR/X86/global-value-operands.mir
index 526f939d0ce5b..d981abf8ba5b9 100644
--- a/llvm/test/CodeGen/MIR/X86/global-value-operands.mir
+++ b/llvm/test/CodeGen/MIR/X86/global-value-operands.mir
@@ -8,14 +8,14 @@
 
   define i32 @inc() {
   entry:
-    %a = load i32, i32* @G
+    %a = load i32, ptr @G
     %b = add i32 %a, 1
     ret i32 %b
   }
 
   define i32 @inc2() {
   entry:
-    %a = load i32, i32* @0
+    %a = load i32, ptr @0
     %b = add i32 %a, 1
     ret i32 %b
   }
@@ -27,10 +27,10 @@
 
   define i32 @test() {
   entry:
-    %a = load i32, i32* @.$0
-    store i32 %a, i32* @-_-
-    %b = load i32, i32* @_-_a
-    store i32 %b, i32* @$.-B
+    %a = load i32, ptr @.$0
+    store i32 %a, ptr @-_-
+    %b = load i32, ptr @_-_a
+    store i32 %b, ptr @$.-B
     ret i32 %b
   }
 
@@ -38,22 +38,22 @@
 
   define i32 @test2() {
   entry:
-    %a = load i32, i32* @"\01Hello@$%09 \\ World,"
+    %a = load i32, ptr @"\01Hello@$%09 \\ World,"
     ret i32 %a
   }
 
   define i32 @test3() {
   entry:
-    %a = load i32, i32* @.$0
-    store i32 %a, i32* @-_-
-    %b = load i32, i32* @_-_a
-    store i32 %b, i32* @$.-B
+    %a = load i32, ptr @.$0
+    store i32 %a, ptr @-_-
+    %b = load i32, ptr @_-_a
+    store i32 %b, ptr @$.-B
     ret i32 %b
   }
 
   define i32 @tf() {
   entry:
-    %a = load i32, i32* @G
+    %a = load i32, ptr @G
     %b = add i32 %a, 1
     ret i32 %b
   }

diff  --git a/llvm/test/CodeGen/MIR/X86/instr-heap-alloc-operands.mir b/llvm/test/CodeGen/MIR/X86/instr-heap-alloc-operands.mir
index e424bffe6db94..e1edb19c9a74b 100644
--- a/llvm/test/CodeGen/MIR/X86/instr-heap-alloc-operands.mir
+++ b/llvm/test/CodeGen/MIR/X86/instr-heap-alloc-operands.mir
@@ -2,11 +2,11 @@
 # This test ensures that the MIR parser parses heap alloc markers correctly.
 
 --- |
-  declare i8* @f(i32) nounwind
+  declare ptr @f(i32) nounwind
 
   define i32 @test(i32 %x) nounwind {
   entry:
-    call i8* @f(i32 %x), !heapallocsite !2
+    call ptr @f(i32 %x), !heapallocsite !2
     ret i32 undef
   }
 

diff  --git a/llvm/test/CodeGen/MIR/X86/instr-pcsections.mir b/llvm/test/CodeGen/MIR/X86/instr-pcsections.mir
index 1ee25c000ceb6..ad1b8efe7f86c 100644
--- a/llvm/test/CodeGen/MIR/X86/instr-pcsections.mir
+++ b/llvm/test/CodeGen/MIR/X86/instr-pcsections.mir
@@ -3,9 +3,9 @@
 
 --- |
 
-  define i8 @test(i8* %a) {
+  define i8 @test(ptr %a) {
   entry:
-    %0 = load i8, i8* %a, align 1, !pcsections !0
+    %0 = load i8, ptr %a, align 1, !pcsections !0
     ret i8 %0
   }
 

diff  --git a/llvm/test/CodeGen/MIR/X86/instructions-debug-location.mir b/llvm/test/CodeGen/MIR/X86/instructions-debug-location.mir
index 9abe47991f322..78403629c7b05 100644
--- a/llvm/test/CodeGen/MIR/X86/instructions-debug-location.mir
+++ b/llvm/test/CodeGen/MIR/X86/instructions-debug-location.mir
@@ -7,29 +7,29 @@
   define i32 @test(i32 %x) #0 !dbg !4 {
   entry:
     %x.addr = alloca i32, align 4
-    store i32 %x, i32* %x.addr, align 4
-    call void @llvm.dbg.declare(metadata i32* %x.addr, metadata !12, metadata !DIExpression()), !dbg !13
-    %0 = load i32, i32* %x.addr, align 4, !dbg !14
+    store i32 %x, ptr %x.addr, align 4
+    call void @llvm.dbg.declare(metadata ptr %x.addr, metadata !12, metadata !DIExpression()), !dbg !13
+    %0 = load i32, ptr %x.addr, align 4, !dbg !14
     ret i32 %0, !dbg !14
   }
 
   define i32 @test_typed_immediates(i32 %x) #0 {
   entry:
     %x.addr = alloca i32, align 4
-    store i32 %x, i32* %x.addr, align 4
-    call void @llvm.dbg.declare(metadata i32* %x.addr, metadata !12, metadata !DIExpression()), !dbg !13
-    %0 = load i32, i32* %x.addr, align 4, !dbg !14
+    store i32 %x, ptr %x.addr, align 4
+    call void @llvm.dbg.declare(metadata ptr %x.addr, metadata !12, metadata !DIExpression()), !dbg !13
+    %0 = load i32, ptr %x.addr, align 4, !dbg !14
     ret i32 %0, !dbg !14
   }
 
   define i32 @test_mir_created(i32 %x) #0 !dbg !15 {
   entry:
     %x.addr = alloca i32, align 4
-    store i32 %x, i32* %x.addr, align 4
-    %0 = load i32, i32* %x.addr, align 4
-    %1 = load i32, i32* %x.addr, align 4
-    %2 = load i32, i32* %x.addr, align 4
-    %3 = load i32, i32* %x.addr, align 4
+    store i32 %x, ptr %x.addr, align 4
+    %0 = load i32, ptr %x.addr, align 4
+    %1 = load i32, ptr %x.addr, align 4
+    %2 = load i32, ptr %x.addr, align 4
+    %3 = load i32, ptr %x.addr, align 4
     ret i32 %0, !dbg !16
   }
 

diff  --git a/llvm/test/CodeGen/MIR/X86/invalid-metadata-node-type.mir b/llvm/test/CodeGen/MIR/X86/invalid-metadata-node-type.mir
index 30a8c6b99e6ce..63f394356f201 100644
--- a/llvm/test/CodeGen/MIR/X86/invalid-metadata-node-type.mir
+++ b/llvm/test/CodeGen/MIR/X86/invalid-metadata-node-type.mir
@@ -6,12 +6,12 @@
   entry:
     %x.i = alloca i8, align 1
     %y.i = alloca [256 x i8], align 16
-    %0 = bitcast i8* %x.i to i8*
+    %0 = bitcast ptr %x.i to ptr
     br label %for.body
 
   for.body:
-    %1 = bitcast [256 x i8]* %y.i to i8*
-    call void @llvm.dbg.declare(metadata i8* %0, metadata !4, metadata !7) #3, !dbg !8
+    %1 = bitcast ptr %y.i to ptr
+    call void @llvm.dbg.declare(metadata ptr %0, metadata !4, metadata !7) #3, !dbg !8
     br label %for.body
   }
 

diff  --git a/llvm/test/CodeGen/MIR/X86/invalid-target-flag-name.mir b/llvm/test/CodeGen/MIR/X86/invalid-target-flag-name.mir
index a3d4a3857434b..c30d58a642c02 100644
--- a/llvm/test/CodeGen/MIR/X86/invalid-target-flag-name.mir
+++ b/llvm/test/CodeGen/MIR/X86/invalid-target-flag-name.mir
@@ -6,7 +6,7 @@
 
   define i32 @inc() {
   entry:
-    %a = load i32, i32* @G
+    %a = load i32, ptr @G
     %b = add i32 %a, 1
     ret i32 %b
   }

diff  --git a/llvm/test/CodeGen/MIR/X86/large-index-number-error.mir b/llvm/test/CodeGen/MIR/X86/large-index-number-error.mir
index ba68680089793..4fdb17f8a1719 100644
--- a/llvm/test/CodeGen/MIR/X86/large-index-number-error.mir
+++ b/llvm/test/CodeGen/MIR/X86/large-index-number-error.mir
@@ -2,9 +2,9 @@
 
 --- |
 
-  define i32 @foo(i32* %p) {
+  define i32 @foo(ptr %p) {
   entry:
-    %a = load i32, i32* %p
+    %a = load i32, ptr %p
     %b = icmp sle i32 %a, 10
     br i1 %b, label %0, label %1
 

diff  --git a/llvm/test/CodeGen/MIR/X86/large-offset-number-error.mir b/llvm/test/CodeGen/MIR/X86/large-offset-number-error.mir
index 2063f4ca98cb8..a14cf33889808 100644
--- a/llvm/test/CodeGen/MIR/X86/large-offset-number-error.mir
+++ b/llvm/test/CodeGen/MIR/X86/large-offset-number-error.mir
@@ -6,7 +6,7 @@
 
   define i32 @inc() {
   entry:
-    %a = load i32, i32* @G
+    %a = load i32, ptr @G
     %b = add i32 %a, 1
     ret i32 %b
   }

diff  --git a/llvm/test/CodeGen/MIR/X86/large-size-in-memory-operand-error.mir b/llvm/test/CodeGen/MIR/X86/large-size-in-memory-operand-error.mir
index b0fae40f7fdf1..51438ac53f087 100644
--- a/llvm/test/CodeGen/MIR/X86/large-size-in-memory-operand-error.mir
+++ b/llvm/test/CodeGen/MIR/X86/large-size-in-memory-operand-error.mir
@@ -2,9 +2,9 @@
 
 --- |
 
-  define i32 @test(i32* %a) {
+  define i32 @test(ptr %a) {
   entry:
-    %b = load i32, i32* %a
+    %b = load i32, ptr %a
     ret i32 %b
   }
 

diff  --git a/llvm/test/CodeGen/MIR/X86/machine-basic-block-operands.mir b/llvm/test/CodeGen/MIR/X86/machine-basic-block-operands.mir
index e4e3a52d23117..72e36f5e5716d 100644
--- a/llvm/test/CodeGen/MIR/X86/machine-basic-block-operands.mir
+++ b/llvm/test/CodeGen/MIR/X86/machine-basic-block-operands.mir
@@ -3,9 +3,9 @@
 
 --- |
 
-  define i32 @foo(i32* %p) {
+  define i32 @foo(ptr %p) {
   entry:
-    %a = load i32, i32* %p
+    %a = load i32, ptr %p
     %0 = icmp sle i32 %a, 10
     br i1 %0, label %less, label %exit
 
@@ -16,9 +16,9 @@
     ret i32 %a
   }
 
-  define i32 @bar(i32* %p) {
+  define i32 @bar(ptr %p) {
   entry:
-    %a = load i32, i32* %p
+    %a = load i32, ptr %p
     %b = icmp sle i32 %a, 10
     br i1 %b, label %0, label %1
 

diff  --git a/llvm/test/CodeGen/MIR/X86/machine-metadata-error.mir b/llvm/test/CodeGen/MIR/X86/machine-metadata-error.mir
index e3ab673c4d127..352d689c61779 100644
--- a/llvm/test/CodeGen/MIR/X86/machine-metadata-error.mir
+++ b/llvm/test/CodeGen/MIR/X86/machine-metadata-error.mir
@@ -5,7 +5,7 @@
   target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
   target triple = "x86_64-unknown-linux-gnu"
 
-  define i32 @test_memcpy(i32* nocapture %p, i32* nocapture readonly %q) {
+  define i32 @test_memcpy(ptr nocapture %p, ptr nocapture readonly %q) {
     ret i32 0
   }
 ...

diff  --git a/llvm/test/CodeGen/MIR/X86/machine-metadata.mir b/llvm/test/CodeGen/MIR/X86/machine-metadata.mir
index 9e4f6d04a4bc2..47929752f1af8 100644
--- a/llvm/test/CodeGen/MIR/X86/machine-metadata.mir
+++ b/llvm/test/CodeGen/MIR/X86/machine-metadata.mir
@@ -6,49 +6,49 @@
   target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
   target triple = "x86_64-unknown-linux-gnu"
 
-  define i32 @test_memcpy(i32* nocapture %p, i32* nocapture readonly %q) {
-    %p0 = bitcast i32* %p to i8*
-    %add.ptr = getelementptr inbounds i32, i32* %p, i64 4
-    %p1 = bitcast i32* %add.ptr to i8*
-    tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 4 dereferenceable(16) %p0, i8* noundef nonnull align 4 dereferenceable(16) %p1, i64 16, i1 false), !alias.scope !0, !noalias !3
-    %v0 = load i32, i32* %q, align 4, !alias.scope !3, !noalias !0
-    %q1 = getelementptr inbounds i32, i32* %q, i64 1
-    %v1 = load i32, i32* %q1, align 4, !alias.scope !3, !noalias !0
+  define i32 @test_memcpy(ptr nocapture %p, ptr nocapture readonly %q) {
+    %p0 = bitcast ptr %p to ptr
+    %add.ptr = getelementptr inbounds i32, ptr %p, i64 4
+    %p1 = bitcast ptr %add.ptr to ptr
+    tail call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 4 dereferenceable(16) %p0, ptr noundef nonnull align 4 dereferenceable(16) %p1, i64 16, i1 false), !alias.scope !0, !noalias !3
+    %v0 = load i32, ptr %q, align 4, !alias.scope !3, !noalias !0
+    %q1 = getelementptr inbounds i32, ptr %q, i64 1
+    %v1 = load i32, ptr %q1, align 4, !alias.scope !3, !noalias !0
     %add = add i32 %v0, %v1
     ret i32 %add
   }
 
-  define i32 @test_memcpy_inline(i32* nocapture %p, i32* nocapture readonly %q) {
-    %p0 = bitcast i32* %p to i8*
-    %add.ptr = getelementptr inbounds i32, i32* %p, i64 4
-    %p1 = bitcast i32* %add.ptr to i8*
-    tail call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* noundef nonnull align 4 dereferenceable(16) %p0, i8* noundef nonnull align 4 dereferenceable(16) %p1, i64 16, i1 false), !alias.scope !0, !noalias !3
-    %v0 = load i32, i32* %q, align 4, !alias.scope !3, !noalias !0
-    %q1 = getelementptr inbounds i32, i32* %q, i64 1
-    %v1 = load i32, i32* %q1, align 4, !alias.scope !3, !noalias !0
+  define i32 @test_memcpy_inline(ptr nocapture %p, ptr nocapture readonly %q) {
+    %p0 = bitcast ptr %p to ptr
+    %add.ptr = getelementptr inbounds i32, ptr %p, i64 4
+    %p1 = bitcast ptr %add.ptr to ptr
+    tail call void @llvm.memcpy.inline.p0.p0.i64(ptr noundef nonnull align 4 dereferenceable(16) %p0, ptr noundef nonnull align 4 dereferenceable(16) %p1, i64 16, i1 false), !alias.scope !0, !noalias !3
+    %v0 = load i32, ptr %q, align 4, !alias.scope !3, !noalias !0
+    %q1 = getelementptr inbounds i32, ptr %q, i64 1
+    %v1 = load i32, ptr %q1, align 4, !alias.scope !3, !noalias !0
     %add = add i32 %v0, %v1
     ret i32 %add
   }
 
-  define i32 @test_mempcpy(i32* nocapture %p, i32* nocapture readonly %q) {
-    %p0 = bitcast i32* %p to i8*
-    %add.ptr = getelementptr inbounds i32, i32* %p, i64 4
-    %p1 = bitcast i32* %add.ptr to i8*
-    %call = tail call i8* @mempcpy(i8* noundef nonnull align 4 dereferenceable(16) %p0, i8* noundef nonnull align 4 dereferenceable(16) %p1, i64 16), !alias.scope !0, !noalias !3
-    %v0 = load i32, i32* %q, align 4, !alias.scope !3, !noalias !0
-    %q1 = getelementptr inbounds i32, i32* %q, i64 1
-    %v1 = load i32, i32* %q1, align 4, !alias.scope !3, !noalias !0
+  define i32 @test_mempcpy(ptr nocapture %p, ptr nocapture readonly %q) {
+    %p0 = bitcast ptr %p to ptr
+    %add.ptr = getelementptr inbounds i32, ptr %p, i64 4
+    %p1 = bitcast ptr %add.ptr to ptr
+    %call = tail call ptr @mempcpy(ptr noundef nonnull align 4 dereferenceable(16) %p0, ptr noundef nonnull align 4 dereferenceable(16) %p1, i64 16), !alias.scope !0, !noalias !3
+    %v0 = load i32, ptr %q, align 4, !alias.scope !3, !noalias !0
+    %q1 = getelementptr inbounds i32, ptr %q, i64 1
+    %v1 = load i32, ptr %q1, align 4, !alias.scope !3, !noalias !0
     %add = add i32 %v0, %v1
     ret i32 %add
   }
 
   ; Function Attrs: argmemonly nofree nounwind willreturn
-  declare void @llvm.memcpy.p0i8.p0i8.i64(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i64, i1 immarg) #0
+  declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg) #0
 
   ; Function Attrs: argmemonly nofree nounwind willreturn
-  declare void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i64 immarg, i1 immarg) #0
+  declare void @llvm.memcpy.inline.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64 immarg, i1 immarg) #0
 
-  declare i8* @mempcpy(i8*, i8*, i64)
+  declare ptr @mempcpy(ptr, ptr, i64)
 
   attributes #0 = { argmemonly nofree nounwind willreturn }
 

diff  --git a/llvm/test/CodeGen/MIR/X86/machine-verifier-address.mir b/llvm/test/CodeGen/MIR/X86/machine-verifier-address.mir
index 228510a0ca818..64c07ae16f037 100644
--- a/llvm/test/CodeGen/MIR/X86/machine-verifier-address.mir
+++ b/llvm/test/CodeGen/MIR/X86/machine-verifier-address.mir
@@ -24,7 +24,7 @@ body:             |
     %6:vr256, %7:vr256 = VGATHERQPDYrm %3, %0, 16, killed %8, 0, $noreg, %5 :: (load unknown-size, align 8)
     %9:vr128 = COPY %6.sub_xmm
     ; CHECK: *** Bad machine code: Displacement in address must fit into 32-bit signed integer ***
-    VMOVLPDmr $noreg, 1, $noreg, 1111111111111, $noreg, killed %9 :: (store (s64) into `i64* undef`)
+    VMOVLPDmr $noreg, 1, $noreg, 1111111111111, $noreg, killed %9 :: (store (s64) into `ptr undef`)
     JMP_1 %bb.1
     ; CHECK: LLVM ERROR: Found 2 machine code errors
 

diff  --git a/llvm/test/CodeGen/MIR/X86/metadata-operands.mir b/llvm/test/CodeGen/MIR/X86/metadata-operands.mir
index fe1f21efacd7c..e6f8e05dcb1f7 100644
--- a/llvm/test/CodeGen/MIR/X86/metadata-operands.mir
+++ b/llvm/test/CodeGen/MIR/X86/metadata-operands.mir
@@ -7,9 +7,9 @@
   define i32 @test(i32 %x) #0 !dbg !4 {
   entry:
     %x.addr = alloca i32, align 4
-    store i32 %x, i32* %x.addr, align 4
-    call void @llvm.dbg.declare(metadata i32* %x.addr, metadata !12, metadata !DIExpression()), !dbg !13
-    %0 = load i32, i32* %x.addr, align 4, !dbg !14
+    store i32 %x, ptr %x.addr, align 4
+    call void @llvm.dbg.declare(metadata ptr %x.addr, metadata !12, metadata !DIExpression()), !dbg !13
+    %0 = load i32, ptr %x.addr, align 4, !dbg !14
     ret i32 %0, !dbg !14
   }
 

diff  --git a/llvm/test/CodeGen/MIR/X86/missing-closing-quote.mir b/llvm/test/CodeGen/MIR/X86/missing-closing-quote.mir
index ffbbf4f6591e4..dc4c3638f48bf 100644
--- a/llvm/test/CodeGen/MIR/X86/missing-closing-quote.mir
+++ b/llvm/test/CodeGen/MIR/X86/missing-closing-quote.mir
@@ -6,7 +6,7 @@
 
   define i32 @test() {
   entry:
-    %a = load i32, i32* @"quoted name"
+    %a = load i32, ptr @"quoted name"
     ret i32 %a
   }
 

diff  --git a/llvm/test/CodeGen/MIR/X86/missing-implicit-operand.mir b/llvm/test/CodeGen/MIR/X86/missing-implicit-operand.mir
index 4bc09b6a4a3f9..dcb39e86d680f 100644
--- a/llvm/test/CodeGen/MIR/X86/missing-implicit-operand.mir
+++ b/llvm/test/CodeGen/MIR/X86/missing-implicit-operand.mir
@@ -4,9 +4,9 @@
 
 --- |
 
-  define i32 @foo(i32* %p) {
+  define i32 @foo(ptr %p) {
   entry:
-    %a = load i32, i32* %p
+    %a = load i32, ptr %p
     %0 = icmp sle i32 %a, 10
     br i1 %0, label %less, label %exit
 

diff  --git a/llvm/test/CodeGen/MIR/X86/null-register-operands.mir b/llvm/test/CodeGen/MIR/X86/null-register-operands.mir
index f64ba1b78721a..5d22399cd72d0 100644
--- a/llvm/test/CodeGen/MIR/X86/null-register-operands.mir
+++ b/llvm/test/CodeGen/MIR/X86/null-register-operands.mir
@@ -3,9 +3,9 @@
 
 --- |
 
-  define i32 @deref(i32* %p) {
+  define i32 @deref(ptr %p) {
   entry:
-    %a = load i32, i32* %p
+    %a = load i32, ptr %p
     ret i32 %a
   }
 

diff  --git a/llvm/test/CodeGen/MIR/X86/pr38773.mir b/llvm/test/CodeGen/MIR/X86/pr38773.mir
index 9af152bd91a66..1d4be4b23f400 100644
--- a/llvm/test/CodeGen/MIR/X86/pr38773.mir
+++ b/llvm/test/CodeGen/MIR/X86/pr38773.mir
@@ -41,9 +41,9 @@
   define dso_local i32 @main() local_unnamed_addr !dbg !7 {
   entry:
     %foo = alloca i32, align 4
-    store volatile i32 4, i32* %foo, align 4
-    %foo.0.foo.0. = load volatile i32, i32* %foo, align 4
-    %foo.0.foo.0.6 = load volatile i32, i32* %foo, align 4
+    store volatile i32 4, ptr %foo, align 4
+    %foo.0.foo.0. = load volatile i32, ptr %foo, align 4
+    %foo.0.foo.0.6 = load volatile i32, ptr %foo, align 4
     %cmp = icmp eq i32 %foo.0.foo.0., 4
     br i1 %cmp, label %sw.bb1, label %sw.bb
   

diff  --git a/llvm/test/CodeGen/MIR/X86/register-operands-target-flag-error.mir b/llvm/test/CodeGen/MIR/X86/register-operands-target-flag-error.mir
index b6bcb778bb96b..dff926e1ebc00 100644
--- a/llvm/test/CodeGen/MIR/X86/register-operands-target-flag-error.mir
+++ b/llvm/test/CodeGen/MIR/X86/register-operands-target-flag-error.mir
@@ -6,7 +6,7 @@
 
   define i32 @inc() {
   entry:
-    %a = load i32, i32* @G
+    %a = load i32, ptr @G
     %b = add i32 %a, 1
     ret i32 %b
   }

diff  --git a/llvm/test/CodeGen/MIR/X86/spill-slot-fixed-stack-object-aliased.mir b/llvm/test/CodeGen/MIR/X86/spill-slot-fixed-stack-object-aliased.mir
index 9063d296f93a2..f3a60a91f6d00 100644
--- a/llvm/test/CodeGen/MIR/X86/spill-slot-fixed-stack-object-aliased.mir
+++ b/llvm/test/CodeGen/MIR/X86/spill-slot-fixed-stack-object-aliased.mir
@@ -5,8 +5,8 @@
   define i32 @test(i32 %a) #0 {
   entry:
     %b = alloca i32
-    store i32 %a, i32* %b
-    %c = load i32, i32* %b
+    store i32 %a, ptr %b
+    %c = load i32, ptr %b
     ret i32 %c
   }
 

diff  --git a/llvm/test/CodeGen/MIR/X86/spill-slot-fixed-stack-object-immutable.mir b/llvm/test/CodeGen/MIR/X86/spill-slot-fixed-stack-object-immutable.mir
index 430f0143c78b1..a705beaca1e8d 100644
--- a/llvm/test/CodeGen/MIR/X86/spill-slot-fixed-stack-object-immutable.mir
+++ b/llvm/test/CodeGen/MIR/X86/spill-slot-fixed-stack-object-immutable.mir
@@ -5,8 +5,8 @@
   define i32 @test(i32 %a) #0 {
   entry:
     %b = alloca i32
-    store i32 %a, i32* %b
-    %c = load i32, i32* %b
+    store i32 %a, ptr %b
+    %c = load i32, ptr %b
     ret i32 %c
   }
 

diff  --git a/llvm/test/CodeGen/MIR/X86/spill-slot-fixed-stack-objects.mir b/llvm/test/CodeGen/MIR/X86/spill-slot-fixed-stack-objects.mir
index 1f7c200848a89..fd634b02c208b 100644
--- a/llvm/test/CodeGen/MIR/X86/spill-slot-fixed-stack-objects.mir
+++ b/llvm/test/CodeGen/MIR/X86/spill-slot-fixed-stack-objects.mir
@@ -6,8 +6,8 @@
   define i32 @test(i32 %a) #0 {
   entry:
     %b = alloca i32
-    store i32 %a, i32* %b
-    %c = load i32, i32* %b
+    store i32 %a, ptr %b
+    %c = load i32, ptr %b
     ret i32 %c
   }
 

diff  --git a/llvm/test/CodeGen/MIR/X86/stack-object-debug-info.mir b/llvm/test/CodeGen/MIR/X86/stack-object-debug-info.mir
index bfaf8a420a161..9c5f549a9ea9d 100644
--- a/llvm/test/CodeGen/MIR/X86/stack-object-debug-info.mir
+++ b/llvm/test/CodeGen/MIR/X86/stack-object-debug-info.mir
@@ -8,20 +8,20 @@
   entry:
     %x.i = alloca i8, align 1
     %y.i = alloca [256 x i8], align 16
-    %0 = bitcast [256 x i8]* %y.i to i8*
+    %0 = bitcast ptr %y.i to ptr
     br label %for.body
 
   for.body:
-    %1 = bitcast [256 x i8]* %y.i to i8*
-    call void @llvm.lifetime.end(i64 -1, i8* %1) #3
-    call void @llvm.lifetime.start(i64 -1, i8* %0) #3
-    call void @llvm.dbg.declare(metadata i8* %0, metadata !4, metadata !DIExpression()) #3, !dbg !7
+    %1 = bitcast ptr %y.i to ptr
+    call void @llvm.lifetime.end(i64 -1, ptr %1) #3
+    call void @llvm.lifetime.start(i64 -1, ptr %0) #3
+    call void @llvm.dbg.declare(metadata ptr %0, metadata !4, metadata !DIExpression()) #3, !dbg !7
     br label %for.body
   }
 
-  declare void @llvm.lifetime.start(i64, i8* nocapture) #2
+  declare void @llvm.lifetime.start(i64, ptr nocapture) #2
 
-  declare void @llvm.lifetime.end(i64, i8* nocapture) #2
+  declare void @llvm.lifetime.end(i64, ptr nocapture) #2
 
   attributes #0 = { nounwind readnone }
   attributes #1 = { nounwind ssp uwtable }

diff  --git a/llvm/test/CodeGen/MIR/X86/stack-object-invalid-name.mir b/llvm/test/CodeGen/MIR/X86/stack-object-invalid-name.mir
index 54d01563db451..78281839866a2 100644
--- a/llvm/test/CodeGen/MIR/X86/stack-object-invalid-name.mir
+++ b/llvm/test/CodeGen/MIR/X86/stack-object-invalid-name.mir
@@ -7,8 +7,8 @@
   define i32 @test(i32 %a) {
   entry:
     %b = alloca i32
-    store i32 %a, i32* %b
-    %c = load i32, i32* %b
+    store i32 %a, ptr %b
+    %c = load i32, ptr %b
     ret i32 %c
   }
 

diff  --git a/llvm/test/CodeGen/MIR/X86/stack-object-operand-name-mismatch-error.mir b/llvm/test/CodeGen/MIR/X86/stack-object-operand-name-mismatch-error.mir
index 66d84017e4975..15b33e1818382 100644
--- a/llvm/test/CodeGen/MIR/X86/stack-object-operand-name-mismatch-error.mir
+++ b/llvm/test/CodeGen/MIR/X86/stack-object-operand-name-mismatch-error.mir
@@ -7,8 +7,8 @@
   define i32 @test(i32 %a) {
   entry:
     %b = alloca i32
-    store i32 %a, i32* %b
-    %c = load i32, i32* %b
+    store i32 %a, ptr %b
+    %c = load i32, ptr %b
     ret i32 %c
   }
 

diff  --git a/llvm/test/CodeGen/MIR/X86/stack-object-operands.mir b/llvm/test/CodeGen/MIR/X86/stack-object-operands.mir
index d6dac85f21cb4..672f8297160af 100644
--- a/llvm/test/CodeGen/MIR/X86/stack-object-operands.mir
+++ b/llvm/test/CodeGen/MIR/X86/stack-object-operands.mir
@@ -9,9 +9,9 @@
   entry:
     %b = alloca i32
     %0 = alloca i32
-    store i32 %a, i32* %b
-    store i32 2, i32* %0
-    %c = load i32, i32* %b
+    store i32 %a, ptr %b
+    store i32 2, ptr %0
+    %c = load i32, ptr %b
     ret i32 %c
   }
 

diff  --git a/llvm/test/CodeGen/MIR/X86/stack-object-redefinition-error.mir b/llvm/test/CodeGen/MIR/X86/stack-object-redefinition-error.mir
index ad6b9da529843..20b93077609ce 100644
--- a/llvm/test/CodeGen/MIR/X86/stack-object-redefinition-error.mir
+++ b/llvm/test/CodeGen/MIR/X86/stack-object-redefinition-error.mir
@@ -6,9 +6,9 @@
   entry:
     %b = alloca i32
     %x = alloca i64
-    store i32 %a, i32* %b
-    store i64 2, i64* %x
-    %c = load i32, i32* %b
+    store i32 %a, ptr %b
+    store i64 2, ptr %x
+    %c = load i32, ptr %b
     ret i32 %c
   }
 

diff  --git a/llvm/test/CodeGen/MIR/X86/stack-objects.mir b/llvm/test/CodeGen/MIR/X86/stack-objects.mir
index 282c6102ffb7d..4a97455e20387 100644
--- a/llvm/test/CodeGen/MIR/X86/stack-objects.mir
+++ b/llvm/test/CodeGen/MIR/X86/stack-objects.mir
@@ -7,9 +7,9 @@
   entry:
     %b = alloca i32
     %x = alloca i64
-    store i32 %a, i32* %b
-    store i64 2, i64* %x
-    %c = load i32, i32* %b
+    store i32 %a, ptr %b
+    store i64 2, ptr %x
+    %c = load i32, ptr %b
     ret i32 %c
   }
 

diff  --git a/llvm/test/CodeGen/MIR/X86/undefined-fixed-stack-object.mir b/llvm/test/CodeGen/MIR/X86/undefined-fixed-stack-object.mir
index d7344cb02f6e5..f319108e5628e 100644
--- a/llvm/test/CodeGen/MIR/X86/undefined-fixed-stack-object.mir
+++ b/llvm/test/CodeGen/MIR/X86/undefined-fixed-stack-object.mir
@@ -5,9 +5,9 @@
   entry:
     %b = alloca i32
     %0 = alloca i32
-    store i32 %a, i32* %b
-    store i32 2, i32* %0
-    %c = load i32, i32* %b
+    store i32 %a, ptr %b
+    store i32 2, ptr %0
+    %c = load i32, ptr %b
     ret i32 %c
   }
 

diff  --git a/llvm/test/CodeGen/MIR/X86/undefined-global-value.mir b/llvm/test/CodeGen/MIR/X86/undefined-global-value.mir
index ec27a408933a2..650695c0a9cbf 100644
--- a/llvm/test/CodeGen/MIR/X86/undefined-global-value.mir
+++ b/llvm/test/CodeGen/MIR/X86/undefined-global-value.mir
@@ -8,7 +8,7 @@
 
   define i32 @inc() {
   entry:
-    %a = load i32, i32* @0
+    %a = load i32, ptr @0
     %b = add i32 %a, 1
     ret i32 %b
   }

diff  --git a/llvm/test/CodeGen/MIR/X86/undefined-ir-block-in-blockaddress.mir b/llvm/test/CodeGen/MIR/X86/undefined-ir-block-in-blockaddress.mir
index e28d8049b9c8a..1858979870464 100644
--- a/llvm/test/CodeGen/MIR/X86/undefined-ir-block-in-blockaddress.mir
+++ b/llvm/test/CodeGen/MIR/X86/undefined-ir-block-in-blockaddress.mir
@@ -2,13 +2,13 @@
 
 --- |
 
-  @addr = global i8* null
+  @addr = global ptr null
 
   define void @test() {
   entry:
-    store volatile i8* blockaddress(@test, %block), i8** @addr
-    %val = load volatile i8*, i8** @addr
-    indirectbr i8* %val, [label %block]
+    store volatile ptr blockaddress(@test, %block), ptr @addr
+    %val = load volatile ptr, ptr @addr
+    indirectbr ptr %val, [label %block]
 
   block:
     ret void

diff  --git a/llvm/test/CodeGen/MIR/X86/undefined-ir-block-slot-in-blockaddress.mir b/llvm/test/CodeGen/MIR/X86/undefined-ir-block-slot-in-blockaddress.mir
index b82c5d1ee32ae..309da51ca830f 100644
--- a/llvm/test/CodeGen/MIR/X86/undefined-ir-block-slot-in-blockaddress.mir
+++ b/llvm/test/CodeGen/MIR/X86/undefined-ir-block-slot-in-blockaddress.mir
@@ -2,13 +2,13 @@
 
 --- |
 
-  @addr = global i8* null
+  @addr = global ptr null
 
   define void @test() {
   entry:
-    store volatile i8* blockaddress(@test, %0), i8** @addr
-    %val = load volatile i8*, i8** @addr
-    indirectbr i8* %val, [label %0]
+    store volatile ptr blockaddress(@test, %0), ptr @addr
+    %val = load volatile ptr, ptr @addr
+    indirectbr ptr %val, [label %0]
 
     ret void
   }

diff  --git a/llvm/test/CodeGen/MIR/X86/undefined-named-global-value.mir b/llvm/test/CodeGen/MIR/X86/undefined-named-global-value.mir
index 8d0e129515407..48f17ef3b006e 100644
--- a/llvm/test/CodeGen/MIR/X86/undefined-named-global-value.mir
+++ b/llvm/test/CodeGen/MIR/X86/undefined-named-global-value.mir
@@ -8,7 +8,7 @@
 
   define i32 @inc() {
   entry:
-    %a = load i32, i32* @G
+    %a = load i32, ptr @G
     %b = add i32 %a, 1
     ret i32 %b
   }

diff  --git a/llvm/test/CodeGen/MIR/X86/undefined-stack-object.mir b/llvm/test/CodeGen/MIR/X86/undefined-stack-object.mir
index bf3f5fa2eae5f..ea77d137eb820 100644
--- a/llvm/test/CodeGen/MIR/X86/undefined-stack-object.mir
+++ b/llvm/test/CodeGen/MIR/X86/undefined-stack-object.mir
@@ -4,8 +4,8 @@
   define i32 @test(i32 %a) {
   entry:
     %b = alloca i32
-    store i32 %a, i32* %b
-    %c = load i32, i32* %b
+    store i32 %a, ptr %b
+    %c = load i32, ptr %b
     ret i32 %c
   }
 

diff  --git a/llvm/test/CodeGen/MIR/X86/undefined-value-in-memory-operand.mir b/llvm/test/CodeGen/MIR/X86/undefined-value-in-memory-operand.mir
index 6069e48def4b1..7bfb6ad197d9e 100644
--- a/llvm/test/CodeGen/MIR/X86/undefined-value-in-memory-operand.mir
+++ b/llvm/test/CodeGen/MIR/X86/undefined-value-in-memory-operand.mir
@@ -2,9 +2,9 @@
 
 --- |
 
-  define i32 @test(i32* %a) {
+  define i32 @test(ptr %a) {
   entry:
-    %b = load i32, i32* %a
+    %b = load i32, ptr %a
     ret i32 %b
   }
 

diff  --git a/llvm/test/CodeGen/MIR/X86/unknown-machine-basic-block.mir b/llvm/test/CodeGen/MIR/X86/unknown-machine-basic-block.mir
index 0c5a58e467cee..6e6796c06464d 100644
--- a/llvm/test/CodeGen/MIR/X86/unknown-machine-basic-block.mir
+++ b/llvm/test/CodeGen/MIR/X86/unknown-machine-basic-block.mir
@@ -5,9 +5,9 @@
 
 --- |
 
-  define i32 @foo(i32* %p) {
+  define i32 @foo(ptr %p) {
   entry:
-    %a = load i32, i32* %p
+    %a = load i32, ptr %p
     %b = icmp sle i32 %a, 10
     br i1 %b, label %0, label %1
 

diff  --git a/llvm/test/CodeGen/MIR/X86/unknown-metadata-keyword.mir b/llvm/test/CodeGen/MIR/X86/unknown-metadata-keyword.mir
index 45d39c399660c..e08ad4c93b01c 100644
--- a/llvm/test/CodeGen/MIR/X86/unknown-metadata-keyword.mir
+++ b/llvm/test/CodeGen/MIR/X86/unknown-metadata-keyword.mir
@@ -1,11 +1,11 @@
 # RUN: not llc -march=x86-64 -run-pass none -o /dev/null %s 2>&1 | FileCheck %s
 
 --- |
-  define i32 @inc(i32* %x) {
+  define i32 @inc(ptr %x) {
   entry:
-    %0 = load i32, i32* %x
+    %0 = load i32, ptr %x
     %1 = add i32 %0, 1
-    store i32 %1, i32* %x
+    store i32 %1, ptr %x
     ret i32 %1
   }
 ...

diff  --git a/llvm/test/CodeGen/MIR/X86/unknown-metadata-node.mir b/llvm/test/CodeGen/MIR/X86/unknown-metadata-node.mir
index 260b0a0a15d54..95fc53b441fb9 100644
--- a/llvm/test/CodeGen/MIR/X86/unknown-metadata-node.mir
+++ b/llvm/test/CodeGen/MIR/X86/unknown-metadata-node.mir
@@ -5,9 +5,9 @@
   define i32 @test(i32 %x) #0 !dbg !4 {
   entry:
     %x.addr = alloca i32, align 4
-    store i32 %x, i32* %x.addr, align 4
-    call void @llvm.dbg.declare(metadata i32* %x.addr, metadata !12, metadata !13), !dbg !14
-    %0 = load i32, i32* %x.addr, align 4, !dbg !15
+    store i32 %x, ptr %x.addr, align 4
+    call void @llvm.dbg.declare(metadata ptr %x.addr, metadata !12, metadata !13), !dbg !14
+    %0 = load i32, ptr %x.addr, align 4, !dbg !15
     ret i32 %0, !dbg !15
   }
 

diff  --git a/llvm/test/CodeGen/MIR/X86/unknown-named-machine-basic-block.mir b/llvm/test/CodeGen/MIR/X86/unknown-named-machine-basic-block.mir
index 11d894dbeff59..10819bfafd9ae 100644
--- a/llvm/test/CodeGen/MIR/X86/unknown-named-machine-basic-block.mir
+++ b/llvm/test/CodeGen/MIR/X86/unknown-named-machine-basic-block.mir
@@ -4,9 +4,9 @@
 
 --- |
 
-  define i32 @foo(i32* %p) {
+  define i32 @foo(ptr %p) {
   entry:
-    %a = load i32, i32* %p
+    %a = load i32, ptr %p
     %0 = icmp sle i32 %a, 10
     br i1 %0, label %less, label %exit
 

diff  --git a/llvm/test/CodeGen/MIR/X86/variable-sized-stack-object-size-error.mir b/llvm/test/CodeGen/MIR/X86/variable-sized-stack-object-size-error.mir
index 77562620f1eb0..ebf0b41c09e4d 100644
--- a/llvm/test/CodeGen/MIR/X86/variable-sized-stack-object-size-error.mir
+++ b/llvm/test/CodeGen/MIR/X86/variable-sized-stack-object-size-error.mir
@@ -6,9 +6,9 @@
     %b = alloca i32
     %x = alloca i64
     %y = alloca i32, i32 %a
-    store i32 %a, i32* %b
-    store i64 2, i64* %x
-    %c = load i32, i32* %b
+    store i32 %a, ptr %b
+    store i64 2, ptr %x
+    %c = load i32, ptr %b
     ret i32 %c
   }
 

diff  --git a/llvm/test/CodeGen/MIR/X86/variable-sized-stack-objects.mir b/llvm/test/CodeGen/MIR/X86/variable-sized-stack-objects.mir
index 81c2b99fc0684..a34754c8ed483 100644
--- a/llvm/test/CodeGen/MIR/X86/variable-sized-stack-objects.mir
+++ b/llvm/test/CodeGen/MIR/X86/variable-sized-stack-objects.mir
@@ -9,9 +9,9 @@
     %b = alloca i32
     %x = alloca i64
     %y = alloca i32, i32 %a
-    store i32 %a, i32* %b
-    store i64 2, i64* %x
-    %c = load i32, i32* %b
+    store i32 %a, ptr %b
+    store i64 2, ptr %x
+    %c = load i32, ptr %b
     ret i32 %c
   }
 

diff  --git a/llvm/test/CodeGen/MSP430/2009-05-10-CyclicDAG.ll b/llvm/test/CodeGen/MSP430/2009-05-10-CyclicDAG.ll
index 38e9832f526de..9d257f206f04d 100644
--- a/llvm/test/CodeGen/MSP430/2009-05-10-CyclicDAG.ll
+++ b/llvm/test/CodeGen/MSP430/2009-05-10-CyclicDAG.ll
@@ -7,9 +7,9 @@ target triple = "msp430-unknown-linux-gnu"
 
 define void @uip_arp_arpin() nounwind {
 entry:
-	%tmp = load volatile i16, i16* @uip_len		; <i16> [#uses=1]
+	%tmp = load volatile i16, ptr @uip_len		; <i16> [#uses=1]
 	%cmp = icmp ult i16 %tmp, 42		; <i1> [#uses=1]
-	store volatile i16 0, i16* @uip_len
+	store volatile i16 0, ptr @uip_len
 	br i1 %cmp, label %if.then, label %if.end
 
 if.then:		; preds = %entry

diff  --git a/llvm/test/CodeGen/MSP430/2009-05-17-Rot.ll b/llvm/test/CodeGen/MSP430/2009-05-17-Rot.ll
index 30b373990a75b..a2c9cebe72dba 100644
--- a/llvm/test/CodeGen/MSP430/2009-05-17-Rot.ll
+++ b/llvm/test/CodeGen/MSP430/2009-05-17-Rot.ll
@@ -3,15 +3,15 @@
 define i16 @rol1u16(i16 %x.arg) nounwind {
         %retval = alloca i16
         %x = alloca i16
-        store i16 %x.arg, i16* %x
-        %1 = load i16, i16* %x
+        store i16 %x.arg, ptr %x
+        %1 = load i16, ptr %x
         %2 = shl i16 %1, 1
-        %3 = load i16, i16* %x
+        %3 = load i16, ptr %x
         %4 = lshr i16 %3, 15
         %5 = or i16 %2, %4
-        store i16 %5, i16* %retval
+        store i16 %5, ptr %retval
         br label %return
 return:
-        %6 = load i16, i16* %retval
+        %6 = load i16, ptr %retval
         ret i16 %6
 }

diff  --git a/llvm/test/CodeGen/MSP430/2009-05-17-Shift.ll b/llvm/test/CodeGen/MSP430/2009-05-17-Shift.ll
index 2e3dd5593ff0b..ace4413618f75 100644
--- a/llvm/test/CodeGen/MSP430/2009-05-17-Shift.ll
+++ b/llvm/test/CodeGen/MSP430/2009-05-17-Shift.ll
@@ -3,13 +3,13 @@
 define i16 @lsr2u16(i16 %x.arg) nounwind {
         %retval = alloca i16
         %x = alloca i16
-        store i16 %x.arg, i16* %x
-        %1 = load i16, i16* %x
+        store i16 %x.arg, ptr %x
+        %1 = load i16, ptr %x
         %2 = lshr i16 %1, 2
-        store i16 %2, i16* %retval
+        store i16 %2, ptr %retval
         br label %return
 return:
-        %3 = load i16, i16* %retval
+        %3 = load i16, ptr %retval
         ret i16 %3
 
 }

diff  --git a/llvm/test/CodeGen/MSP430/2009-08-25-DynamicStackAlloc.ll b/llvm/test/CodeGen/MSP430/2009-08-25-DynamicStackAlloc.ll
index ca54ff0c3b48b..d15a5c14871f4 100644
--- a/llvm/test/CodeGen/MSP430/2009-08-25-DynamicStackAlloc.ll
+++ b/llvm/test/CodeGen/MSP430/2009-08-25-DynamicStackAlloc.ll
@@ -6,8 +6,8 @@ target triple = "msp430-generic-generic"
 define i16 @foo() nounwind readnone {
 entry:
   %result = alloca i16, align 1                   ; <i16*> [#uses=2]
-  store volatile i16 0, i16* %result
-  %tmp = load volatile i16, i16* %result               ; <i16> [#uses=1]
+  store volatile i16 0, ptr %result
+  %tmp = load volatile i16, ptr %result               ; <i16> [#uses=1]
   ret i16 %tmp
 }
 
@@ -22,8 +22,8 @@ while.cond:                                       ; preds = %while.cond, %entry
 
 while.end:                                        ; preds = %while.cond
   %result.i = alloca i16, align 1                 ; <i16*> [#uses=2]
-  store volatile i16 0, i16* %result.i
-  %tmp.i = load volatile i16, i16* %result.i           ; <i16> [#uses=0]
+  store volatile i16 0, ptr %result.i
+  %tmp.i = load volatile i16, ptr %result.i           ; <i16> [#uses=0]
   ret i16 0
 }
 

diff  --git a/llvm/test/CodeGen/MSP430/2009-09-18-AbsoluteAddr.ll b/llvm/test/CodeGen/MSP430/2009-09-18-AbsoluteAddr.ll
index 72ba335b54e1b..282ee30d73ca3 100644
--- a/llvm/test/CodeGen/MSP430/2009-09-18-AbsoluteAddr.ll
+++ b/llvm/test/CodeGen/MSP430/2009-09-18-AbsoluteAddr.ll
@@ -10,13 +10,13 @@ entry:
   %retval = alloca i8                             ; <i8*> [#uses=2]
   %x.addr = alloca i8                             ; <i8*> [#uses=2]
   %tmp = alloca i8, align 1                       ; <i8*> [#uses=2]
-  store i8 %x, i8* %x.addr
-  %tmp1 = load volatile i8, i8* @"\010x0021"          ; <i8> [#uses=1]
-  store i8 %tmp1, i8* %tmp
-  %tmp2 = load i8, i8* %x.addr                        ; <i8> [#uses=1]
-  store volatile i8 %tmp2, i8* @"\010x0021"
-  %tmp3 = load i8, i8* %tmp                           ; <i8> [#uses=1]
-  store i8 %tmp3, i8* %retval
-  %0 = load i8, i8* %retval                           ; <i8> [#uses=1]
+  store i8 %x, ptr %x.addr
+  %tmp1 = load volatile i8, ptr @"\010x0021"          ; <i8> [#uses=1]
+  store i8 %tmp1, ptr %tmp
+  %tmp2 = load i8, ptr %x.addr                        ; <i8> [#uses=1]
+  store volatile i8 %tmp2, ptr @"\010x0021"
+  %tmp3 = load i8, ptr %tmp                           ; <i8> [#uses=1]
+  store i8 %tmp3, ptr %retval
+  %0 = load i8, ptr %retval                           ; <i8> [#uses=1]
   ret i8 %0
 }

diff  --git a/llvm/test/CodeGen/MSP430/2009-10-10-OrImpDef.ll b/llvm/test/CodeGen/MSP430/2009-10-10-OrImpDef.ll
index 6dfbbfc03e906..45fd35c70eb99 100644
--- a/llvm/test/CodeGen/MSP430/2009-10-10-OrImpDef.ll
+++ b/llvm/test/CodeGen/MSP430/2009-10-10-OrImpDef.ll
@@ -4,9 +4,9 @@ define void @foo() nounwind {
 entry:
 	%r = alloca i8		; <i8*> [#uses=2]
 	%"alloca point" = bitcast i32 0 to i32		; <i32> [#uses=0]
-	load volatile i8, i8* %r, align 1		; <i8>:0 [#uses=1]
+	load volatile i8, ptr %r, align 1		; <i8>:0 [#uses=1]
 	or i8 %0, 1		; <i8>:1 [#uses=1]
-	store volatile i8 %1, i8* %r, align 1
+	store volatile i8 %1, ptr %r, align 1
 	br label %return
 
 return:		; preds = %entry

diff  --git a/llvm/test/CodeGen/MSP430/2009-11-08-InvalidResNo.ll b/llvm/test/CodeGen/MSP430/2009-11-08-InvalidResNo.ll
index 04b087e953638..8d8b9b42f0755 100644
--- a/llvm/test/CodeGen/MSP430/2009-11-08-InvalidResNo.ll
+++ b/llvm/test/CodeGen/MSP430/2009-11-08-InvalidResNo.ll
@@ -2,13 +2,13 @@
 target datalayout = "e-p:16:8:8-i8:8:8-i16:8:8-i32:8:8"
 target triple = "msp430-elf"
 
-%struct.httpd_fs_file = type { i8*, i16 }
-%struct.psock = type { %struct.pt, %struct.pt, i8*, i8*, i8*, i16, i16, %struct.httpd_fs_file, i16, i8, i8 }
+%struct.httpd_fs_file = type { ptr, i16 }
+%struct.psock = type { %struct.pt, %struct.pt, ptr, ptr, ptr, i16, i16, %struct.httpd_fs_file, i16, i8, i8 }
 %struct.pt = type { i16 }
 
- at foo = external global i8*
+ at foo = external global ptr
 
-define signext i8 @psock_readto(%struct.psock* nocapture %psock, i8 zeroext %c) nounwind {
+define signext i8 @psock_readto(ptr nocapture %psock, i8 zeroext %c) nounwind {
 entry:
   switch i16 undef, label %sw.epilog [
     i16 0, label %sw.bb
@@ -46,10 +46,10 @@ while.cond36.i:                                   ; preds = %while.body41.i, %wh
   br i1 undef, label %do.body, label %while.body41.i
 
 while.body41.i:                                   ; preds = %while.cond36.i
-  %tmp43.i = load i8*, i8** @foo                      ; <i8*> [#uses=2]
-  %tmp44.i = load i8, i8* %tmp43.i                    ; <i8> [#uses=1]
-  %ptrincdec50.i = getelementptr inbounds i8, i8* %tmp43.i, i16 1 ; <i8*> [#uses=1]
-  store i8* %ptrincdec50.i, i8** @foo
+  %tmp43.i = load ptr, ptr @foo                      ; <i8*> [#uses=2]
+  %tmp44.i = load i8, ptr %tmp43.i                    ; <i8> [#uses=1]
+  %ptrincdec50.i = getelementptr inbounds i8, ptr %tmp43.i, i16 1 ; <i8*> [#uses=1]
+  store ptr %ptrincdec50.i, ptr @foo
   %cmp55.i = icmp eq i8 %tmp44.i, %c              ; <i1> [#uses=1]
   br i1 %cmp55.i, label %do.end41, label %while.cond36.i
 

diff  --git a/llvm/test/CodeGen/MSP430/2009-12-21-FrameAddr.ll b/llvm/test/CodeGen/MSP430/2009-12-21-FrameAddr.ll
index be82e98dffe37..8037b3bbc5593 100644
--- a/llvm/test/CodeGen/MSP430/2009-12-21-FrameAddr.ll
+++ b/llvm/test/CodeGen/MSP430/2009-12-21-FrameAddr.ll
@@ -5,11 +5,11 @@ target triple = "msp430-unknown-linux-gnu"
 
 define msp430_intrcc void @foo() nounwind #0 {
 entry:
-	%fa = call i8* @llvm.frameaddress(i32 0)
-	store i8 0, i8* %fa
+	%fa = call ptr @llvm.frameaddress(i32 0)
+	store i8 0, ptr %fa
 	ret void
 }
 
-declare i8* @llvm.frameaddress(i32)
+declare ptr @llvm.frameaddress(i32)
 
 attributes #0 = { noinline nounwind optnone "interrupt"="2" }

diff  --git a/llvm/test/CodeGen/MSP430/2009-12-22-InlineAsm.ll b/llvm/test/CodeGen/MSP430/2009-12-22-InlineAsm.ll
index fa9d0c8e46cb9..04ccecd6582be 100644
--- a/llvm/test/CodeGen/MSP430/2009-12-22-InlineAsm.ll
+++ b/llvm/test/CodeGen/MSP430/2009-12-22-InlineAsm.ll
@@ -8,10 +8,10 @@ target triple = "msp430-unknown-unknown"
 
 define i16 @main() noreturn nounwind {
 entry:
-  %0 = tail call i8* asm "", "=r,0"(i8* getelementptr inbounds ([10 x i8], [10 x i8]* @buf, i16 0, i16 0)) nounwind ; <i8*> [#uses=1]
-  %sub.ptr = getelementptr inbounds i8, i8* %0, i16 1 ; <i8*> [#uses=1]
-  %sub.ptr.lhs.cast = ptrtoint i8* %sub.ptr to i16 ; <i16> [#uses=1]
-  %sub.ptr.sub = sub i16 %sub.ptr.lhs.cast, ptrtoint ([10 x i8]* @buf to i16) ; <i16> [#uses=1]
+  %0 = tail call ptr asm "", "=r,0"(ptr @buf) nounwind ; <i8*> [#uses=1]
+  %sub.ptr = getelementptr inbounds i8, ptr %0, i16 1 ; <i8*> [#uses=1]
+  %sub.ptr.lhs.cast = ptrtoint ptr %sub.ptr to i16 ; <i16> [#uses=1]
+  %sub.ptr.sub = sub i16 %sub.ptr.lhs.cast, ptrtoint (ptr @buf to i16) ; <i16> [#uses=1]
   %cmp = icmp eq i16 %sub.ptr.sub, 1              ; <i1> [#uses=1]
   br i1 %cmp, label %bar.exit, label %if.then.i
 

diff  --git a/llvm/test/CodeGen/MSP430/2010-05-01-CombinerAnd.ll b/llvm/test/CodeGen/MSP430/2010-05-01-CombinerAnd.ll
index 907d6abe99212..0864a0f7195c9 100644
--- a/llvm/test/CodeGen/MSP430/2010-05-01-CombinerAnd.ll
+++ b/llvm/test/CodeGen/MSP430/2010-05-01-CombinerAnd.ll
@@ -19,7 +19,7 @@ land.end:                                         ; preds = %land.rhs, %while.co
   br i1 %0, label %while.body, label %while.end
 
 while.body:                                       ; preds = %land.end
-  %tmp4 = load i16, i16* undef                         ; <i16> [#uses=0]
+  %tmp4 = load i16, ptr undef                         ; <i16> [#uses=0]
   br label %while.cond
 
 while.end:                                        ; preds = %land.end

diff  --git a/llvm/test/CodeGen/MSP430/AddrMode-bis-rx.ll b/llvm/test/CodeGen/MSP430/AddrMode-bis-rx.ll
index 948b67eb66c8d..d2a04348e3f9c 100644
--- a/llvm/test/CodeGen/MSP430/AddrMode-bis-rx.ll
+++ b/llvm/test/CodeGen/MSP430/AddrMode-bis-rx.ll
@@ -2,8 +2,8 @@
 target datalayout = "e-p:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:16:16"
 target triple = "msp430-generic-generic"
 
-define i16 @am1(i16 %x, i16* %a) nounwind {
-	%1 = load i16, i16* %a
+define i16 @am1(i16 %x, ptr %a) nounwind {
+	%1 = load i16, ptr %a
 	%2 = or i16 %1,%x
 	ret i16 %2
 }
@@ -13,7 +13,7 @@ define i16 @am1(i16 %x, i16* %a) nounwind {
 @foo = external global i16
 
 define i16 @am2(i16 %x) nounwind {
-	%1 = load i16, i16* @foo
+	%1 = load i16, ptr @foo
 	%2 = or i16 %1,%x
 	ret i16 %2
 }
@@ -23,8 +23,8 @@ define i16 @am2(i16 %x) nounwind {
 @bar = internal constant [2 x i8] [ i8 32, i8 64 ]
 
 define i8 @am3(i8 %x, i16 %n) nounwind {
-	%1 = getelementptr [2 x i8], [2 x i8]* @bar, i16 0, i16 %n
-	%2 = load i8, i8* %1
+	%1 = getelementptr [2 x i8], ptr @bar, i16 0, i16 %n
+	%2 = load i8, ptr %1
 	%3 = or i8 %2,%x
 	ret i8 %3
 }
@@ -32,16 +32,16 @@ define i8 @am3(i8 %x, i16 %n) nounwind {
 ; CHECK:		bis.b	bar(r13), r12
 
 define i16 @am4(i16 %x) nounwind {
-	%1 = load volatile i16, i16* inttoptr(i16 32 to i16*)
+	%1 = load volatile i16, ptr inttoptr(i16 32 to ptr)
 	%2 = or i16 %1,%x
 	ret i16 %2
 }
 ; CHECK-LABEL: am4:
 ; CHECK:		bis	&32, r12
 
-define i16 @am5(i16 %x, i16* %a) nounwind {
-	%1 = getelementptr i16, i16* %a, i16 2
-	%2 = load i16, i16* %1
+define i16 @am5(i16 %x, ptr %a) nounwind {
+	%1 = getelementptr i16, ptr %a, i16 2
+	%2 = load i16, ptr %1
 	%3 = or i16 %2,%x
 	ret i16 %3
 }
@@ -52,7 +52,7 @@ define i16 @am5(i16 %x, i16* %a) nounwind {
 @baz = common global %S zeroinitializer, align 1
 
 define i16 @am6(i16 %x) nounwind {
-	%1 = load i16, i16* getelementptr (%S, %S* @baz, i32 0, i32 1)
+	%1 = load i16, ptr getelementptr (%S, ptr @baz, i32 0, i32 1)
 	%2 = or i16 %1,%x
 	ret i16 %2
 }
@@ -63,9 +63,9 @@ define i16 @am6(i16 %x) nounwind {
 @duh = internal constant %T { i16 16, [2 x i8][i8 32, i8 64 ] }
 
 define i8 @am7(i8 %x, i16 %n) nounwind {
-	%1 = getelementptr %T, %T* @duh, i32 0, i32 1
-	%2 = getelementptr [2 x i8], [2 x i8]* %1, i16 0, i16 %n
-	%3= load i8, i8* %2
+	%1 = getelementptr %T, ptr @duh, i32 0, i32 1
+	%2 = getelementptr [2 x i8], ptr %1, i16 0, i16 %n
+	%3= load i8, ptr %2
 	%4 = or i8 %3,%x
 	ret i8 %4
 }

diff  --git a/llvm/test/CodeGen/MSP430/AddrMode-bis-xr.ll b/llvm/test/CodeGen/MSP430/AddrMode-bis-xr.ll
index 6d3a497386d5d..300949d654bec 100644
--- a/llvm/test/CodeGen/MSP430/AddrMode-bis-xr.ll
+++ b/llvm/test/CodeGen/MSP430/AddrMode-bis-xr.ll
@@ -2,10 +2,10 @@
 target datalayout = "e-p:16:16:16-i8:8:8-i16:16:16-i32:16:16"
 target triple = "msp430-generic-generic"
 
-define void @am1(i16* %a, i16 %x) nounwind {
-	%1 = load i16, i16* %a
+define void @am1(ptr %a, i16 %x) nounwind {
+	%1 = load i16, ptr %a
 	%2 = or i16 %x, %1
-	store i16 %2, i16* %a
+	store i16 %2, ptr %a
 	ret void
 }
 ; CHECK-LABEL: am1:
@@ -14,9 +14,9 @@ define void @am1(i16* %a, i16 %x) nounwind {
 @foo = external global i16
 
 define void @am2(i16 %x) nounwind {
-	%1 = load i16, i16* @foo
+	%1 = load i16, ptr @foo
 	%2 = or i16 %x, %1
-	store i16 %2, i16* @foo
+	store i16 %2, ptr @foo
 	ret void
 }
 ; CHECK-LABEL: am2:
@@ -25,29 +25,29 @@ define void @am2(i16 %x) nounwind {
 @bar = external global [2 x i8]
 
 define void @am3(i16 %i, i8 %x) nounwind {
-	%1 = getelementptr [2 x i8], [2 x i8]* @bar, i16 0, i16 %i
-	%2 = load i8, i8* %1
+	%1 = getelementptr [2 x i8], ptr @bar, i16 0, i16 %i
+	%2 = load i8, ptr %1
 	%3 = or i8 %x, %2
-	store i8 %3, i8* %1
+	store i8 %3, ptr %1
 	ret void
 }
 ; CHECK-LABEL: am3:
 ; CHECK:		bis.b	r13, bar(r12)
 
 define void @am4(i16 %x) nounwind {
-	%1 = load volatile i16, i16* inttoptr(i16 32 to i16*)
+	%1 = load volatile i16, ptr inttoptr(i16 32 to ptr)
 	%2 = or i16 %x, %1
-	store volatile i16 %2, i16* inttoptr(i16 32 to i16*)
+	store volatile i16 %2, ptr inttoptr(i16 32 to ptr)
 	ret void
 }
 ; CHECK-LABEL: am4:
 ; CHECK:		bis	r12, &32
 
-define void @am5(i16* %a, i16 %x) readonly {
-	%1 = getelementptr inbounds i16, i16* %a, i16 2
-	%2 = load i16, i16* %1
+define void @am5(ptr %a, i16 %x) readonly {
+	%1 = getelementptr inbounds i16, ptr %a, i16 2
+	%2 = load i16, ptr %1
 	%3 = or i16 %x, %2
-	store i16 %3, i16* %1
+	store i16 %3, ptr %1
 	ret void
 }
 ; CHECK-LABEL: am5:
@@ -57,9 +57,9 @@ define void @am5(i16* %a, i16 %x) readonly {
 @baz = common global %S zeroinitializer
 
 define void @am6(i16 %x) nounwind {
-	%1 = load i16, i16* getelementptr (%S, %S* @baz, i32 0, i32 1)
+	%1 = load i16, ptr getelementptr (%S, ptr @baz, i32 0, i32 1)
 	%2 = or i16 %x, %1
-	store i16 %2, i16* getelementptr (%S, %S* @baz, i32 0, i32 1)
+	store i16 %2, ptr getelementptr (%S, ptr @baz, i32 0, i32 1)
 	ret void
 }
 ; CHECK-LABEL: am6:
@@ -69,11 +69,11 @@ define void @am6(i16 %x) nounwind {
 @duh = external global %T
 
 define void @am7(i16 %n, i8 %x) nounwind {
-	%1 = getelementptr %T, %T* @duh, i32 0, i32 1
-	%2 = getelementptr [2 x i8], [2 x i8]* %1, i16 0, i16 %n
-	%3 = load i8, i8* %2
+	%1 = getelementptr %T, ptr @duh, i32 0, i32 1
+	%2 = getelementptr [2 x i8], ptr %1, i16 0, i16 %n
+	%3 = load i8, ptr %2
 	%4 = or i8 %x, %3
-	store i8 %4, i8* %2
+	store i8 %4, ptr %2
 	ret void
 }
 ; CHECK-LABEL: am7:

diff  --git a/llvm/test/CodeGen/MSP430/AddrMode-mov-rx.ll b/llvm/test/CodeGen/MSP430/AddrMode-mov-rx.ll
index 0605e8e86ce52..18e2431eebf8b 100644
--- a/llvm/test/CodeGen/MSP430/AddrMode-mov-rx.ll
+++ b/llvm/test/CodeGen/MSP430/AddrMode-mov-rx.ll
@@ -2,8 +2,8 @@
 target datalayout = "e-p:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:16:16"
 target triple = "msp430-generic-generic"
 
-define i16 @am1(i16* %a) nounwind {
-	%1 = load i16, i16* %a
+define i16 @am1(ptr %a) nounwind {
+	%1 = load i16, ptr %a
 	ret i16 %1
 }
 ; CHECK-LABEL: am1:
@@ -12,7 +12,7 @@ define i16 @am1(i16* %a) nounwind {
 @foo = external global i16
 
 define i16 @am2() nounwind {
-	%1 = load i16, i16* @foo
+	%1 = load i16, ptr @foo
 	ret i16 %1
 }
 ; CHECK-LABEL: am2:
@@ -21,23 +21,23 @@ define i16 @am2() nounwind {
 @bar = internal constant [2 x i8] [ i8 32, i8 64 ]
 
 define i8 @am3(i16 %n) nounwind {
-	%1 = getelementptr [2 x i8], [2 x i8]* @bar, i16 0, i16 %n
-	%2 = load i8, i8* %1
+	%1 = getelementptr [2 x i8], ptr @bar, i16 0, i16 %n
+	%2 = load i8, ptr %1
 	ret i8 %2
 }
 ; CHECK-LABEL: am3:
 ; CHECK:		mov.b	bar(r12), r12
 
 define i16 @am4() nounwind {
-	%1 = load volatile i16, i16* inttoptr(i16 32 to i16*)
+	%1 = load volatile i16, ptr inttoptr(i16 32 to ptr)
 	ret i16 %1
 }
 ; CHECK-LABEL: am4:
 ; CHECK:		mov	&32, r12
 
-define i16 @am5(i16* %a) nounwind {
-	%1 = getelementptr i16, i16* %a, i16 2
-	%2 = load i16, i16* %1
+define i16 @am5(ptr %a) nounwind {
+	%1 = getelementptr i16, ptr %a, i16 2
+	%2 = load i16, ptr %1
 	ret i16 %2
 }
 ; CHECK-LABEL: am5:
@@ -47,7 +47,7 @@ define i16 @am5(i16* %a) nounwind {
 @baz = common global %S zeroinitializer, align 1
 
 define i16 @am6() nounwind {
-	%1 = load i16, i16* getelementptr (%S, %S* @baz, i32 0, i32 1)
+	%1 = load i16, ptr getelementptr (%S, ptr @baz, i32 0, i32 1)
 	ret i16 %1
 }
 ; CHECK-LABEL: am6:
@@ -57,9 +57,9 @@ define i16 @am6() nounwind {
 @duh = internal constant %T { i16 16, [2 x i8][i8 32, i8 64 ] }
 
 define i8 @am7(i16 %n) nounwind {
-	%1 = getelementptr %T, %T* @duh, i32 0, i32 1
-	%2 = getelementptr [2 x i8], [2 x i8]* %1, i16 0, i16 %n
-	%3= load i8, i8* %2
+	%1 = getelementptr %T, ptr @duh, i32 0, i32 1
+	%2 = getelementptr [2 x i8], ptr %1, i16 0, i16 %n
+	%3= load i8, ptr %2
 	ret i8 %3
 }
 ; CHECK-LABEL: am7:

diff  --git a/llvm/test/CodeGen/MSP430/AddrMode-mov-xr.ll b/llvm/test/CodeGen/MSP430/AddrMode-mov-xr.ll
index acc0b82571166..09219d019fd2c 100644
--- a/llvm/test/CodeGen/MSP430/AddrMode-mov-xr.ll
+++ b/llvm/test/CodeGen/MSP430/AddrMode-mov-xr.ll
@@ -2,8 +2,8 @@
 target datalayout = "e-p:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:16:16"
 target triple = "msp430-generic-generic"
 
-define void @am1(i16* %a, i16 %b) nounwind {
-	store i16 %b, i16* %a
+define void @am1(ptr %a, i16 %b) nounwind {
+	store i16 %b, ptr %a
 	ret void
 }
 ; CHECK-LABEL: am1:
@@ -12,7 +12,7 @@ define void @am1(i16* %a, i16 %b) nounwind {
 @foo = external global i16
 
 define void @am2(i16 %a) nounwind {
-	store i16 %a, i16* @foo
+	store i16 %a, ptr @foo
 	ret void
 }
 ; CHECK-LABEL: am2:
@@ -21,23 +21,23 @@ define void @am2(i16 %a) nounwind {
 @bar = external global [2 x i8]
 
 define void @am3(i16 %i, i8 %a) nounwind {
-	%1 = getelementptr [2 x i8], [2 x i8]* @bar, i16 0, i16 %i
-	store i8 %a, i8* %1
+	%1 = getelementptr [2 x i8], ptr @bar, i16 0, i16 %i
+	store i8 %a, ptr %1
 	ret void
 }
 ; CHECK-LABEL: am3:
 ; CHECK:		mov.b	r13, bar(r12)
 
 define void @am4(i16 %a) nounwind {
-	store volatile i16 %a, i16* inttoptr(i16 32 to i16*)
+	store volatile i16 %a, ptr inttoptr(i16 32 to ptr)
 	ret void
 }
 ; CHECK-LABEL: am4:
 ; CHECK:		mov	r12, &32
 
-define void @am5(i16* nocapture %p, i16 %a) nounwind readonly {
-	%1 = getelementptr inbounds i16, i16* %p, i16 2
-	store i16 %a, i16* %1
+define void @am5(ptr nocapture %p, i16 %a) nounwind readonly {
+	%1 = getelementptr inbounds i16, ptr %p, i16 2
+	store i16 %a, ptr %1
 	ret void
 }
 ; CHECK-LABEL: am5:
@@ -47,7 +47,7 @@ define void @am5(i16* nocapture %p, i16 %a) nounwind readonly {
 @baz = common global %S zeroinitializer, align 1
 
 define void @am6(i16 %a) nounwind {
-	store i16 %a, i16* getelementptr (%S, %S* @baz, i32 0, i32 1)
+	store i16 %a, ptr getelementptr (%S, ptr @baz, i32 0, i32 1)
 	ret void
 }
 ; CHECK-LABEL: am6:
@@ -57,9 +57,9 @@ define void @am6(i16 %a) nounwind {
 @duh = external global %T
 
 define void @am7(i16 %n, i8 %a) nounwind {
-	%1 = getelementptr %T, %T* @duh, i32 0, i32 1
-	%2 = getelementptr [2 x i8], [2 x i8]* %1, i16 0, i16 %n
-	store i8 %a, i8* %2
+	%1 = getelementptr %T, ptr @duh, i32 0, i32 1
+	%2 = getelementptr [2 x i8], ptr %1, i16 0, i16 %n
+	store i8 %a, ptr %2
 	ret void
 }
 ; CHECK-LABEL: am7:

diff  --git a/llvm/test/CodeGen/MSP430/BranchSelector.ll b/llvm/test/CodeGen/MSP430/BranchSelector.ll
index a36da626234a2..3acd7bd3cca3d 100644
--- a/llvm/test/CodeGen/MSP430/BranchSelector.ll
+++ b/llvm/test/CodeGen/MSP430/BranchSelector.ll
@@ -9,182 +9,182 @@ entry:
   br label %while.cond
 
 while.cond:
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  %v0 = load volatile i16, i16* @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  %v0 = load volatile i16, ptr @reg, align 2
   %lnot = icmp eq i16 %v0, 0
 
 ; This BB should be split and all branches should be expanded.
@@ -198,7 +198,7 @@ while.cond:
   br i1 %lnot, label %while.cond, label %while.end
 
 while.end:
-  %i.0.i.0.1822 = load volatile i16, i16* @reg, align 1
+  %i.0.i.0.1822 = load volatile i16, ptr @reg, align 1
   %cmp23 = icmp ult i16 %i.0.i.0.1822, %count
   br i1 %cmp23, label %for.body, label %for.end
 
@@ -206,186 +206,186 @@ for.body:
   br label %while.cond6
 
 while.cond6:
-  %0 = load volatile i16, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 19, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
+  %0 = load volatile i16, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 19, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
   br label %for.inc
 
 for.inc:
-  %1 = load volatile i16, i16* @reg, align 2
+  %1 = load volatile i16, ptr @reg, align 2
   %cmp = icmp ult i16 %1, %count
 
 ; This branch should be expanded.
@@ -404,176 +404,176 @@ define void @WriteSinglePATable() #0 {
 entry:
   br label %begin
 begin:
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  store volatile i16 13, i16* @reg, align 2
-  store volatile i16 17, i16* @reg, align 2
-  store volatile i16 11, i16* @reg, align 2
-  %v2 = load volatile i16, i16* @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  store volatile i16 13, ptr @reg, align 2
+  store volatile i16 17, ptr @reg, align 2
+  store volatile i16 11, ptr @reg, align 2
+  %v2 = load volatile i16, ptr @reg, align 2
   %lnot = icmp eq i16 %v2, 0
 
 ; This branch should not be expanded

diff  --git a/llvm/test/CodeGen/MSP430/Inst16mi.ll b/llvm/test/CodeGen/MSP430/Inst16mi.ll
index bb99e28a1ba0c..28e2e2c570ed2 100644
--- a/llvm/test/CodeGen/MSP430/Inst16mi.ll
+++ b/llvm/test/CodeGen/MSP430/Inst16mi.ll
@@ -7,42 +7,42 @@ target triple = "msp430-generic-generic"
 define void @mov() nounwind {
 ; CHECK-LABEL: mov:
 ; CHECK: mov	#2, &foo
-	store i16 2, i16 * @foo
+	store i16 2, ptr @foo
 	ret void
 }
 
 define void @add() nounwind {
 ; CHECK-LABEL: add:
 ; CHECK: incd	&foo
-	%1 = load i16, i16* @foo
+	%1 = load i16, ptr @foo
 	%2 = add i16 %1, 2
-	store i16 %2, i16 * @foo
+	store i16 %2, ptr @foo
 	ret void
 }
 
 define void @and() nounwind {
 ; CHECK-LABEL: and:
 ; CHECK: and	#2, &foo
-	%1 = load i16, i16* @foo
+	%1 = load i16, ptr @foo
 	%2 = and i16 %1, 2
-	store i16 %2, i16 * @foo
+	store i16 %2, ptr @foo
 	ret void
 }
 
 define void @bis() nounwind {
 ; CHECK-LABEL: bis:
 ; CHECK: bis	#2, &foo
-	%1 = load i16, i16* @foo
+	%1 = load i16, ptr @foo
 	%2 = or i16 %1, 2
-	store i16 %2, i16 * @foo
+	store i16 %2, ptr @foo
 	ret void
 }
 
 define void @xor() nounwind {
 ; CHECK-LABEL: xor:
 ; CHECK: xor	#2, &foo
-	%1 = load i16, i16* @foo
+	%1 = load i16, ptr @foo
 	%2 = xor i16 %1, 2
-	store i16 %2, i16 * @foo
+	store i16 %2, ptr @foo
 	ret void
 }

diff  --git a/llvm/test/CodeGen/MSP430/Inst16mm.ll b/llvm/test/CodeGen/MSP430/Inst16mm.ll
index af00a18acf550..1851045b32da0 100644
--- a/llvm/test/CodeGen/MSP430/Inst16mm.ll
+++ b/llvm/test/CodeGen/MSP430/Inst16mm.ll
@@ -7,48 +7,48 @@ target triple = "msp430-generic-generic"
 define void @mov() nounwind {
 ; CHECK-LABEL: mov:
 ; CHECK: mov	&bar, &foo
-        %1 = load i16, i16* @bar
-        store i16 %1, i16* @foo
+        %1 = load i16, ptr @bar
+        store i16 %1, ptr @foo
         ret void
 }
 
 define void @add() nounwind {
 ; CHECK-LABEL: add:
 ; CHECK: add	&bar, &foo
-	%1 = load i16, i16* @bar
-	%2 = load i16, i16* @foo
+	%1 = load i16, ptr @bar
+	%2 = load i16, ptr @foo
 	%3 = add i16 %2, %1
-	store i16 %3, i16* @foo
+	store i16 %3, ptr @foo
 	ret void
 }
 
 define void @and() nounwind {
 ; CHECK-LABEL: and:
 ; CHECK: and	&bar, &foo
-	%1 = load i16, i16* @bar
-	%2 = load i16, i16* @foo
+	%1 = load i16, ptr @bar
+	%2 = load i16, ptr @foo
 	%3 = and i16 %2, %1
-	store i16 %3, i16* @foo
+	store i16 %3, ptr @foo
 	ret void
 }
 
 define void @bis() nounwind {
 ; CHECK-LABEL: bis:
 ; CHECK: bis	&bar, &foo
-	%1 = load i16, i16* @bar
-	%2 = load i16, i16* @foo
+	%1 = load i16, ptr @bar
+	%2 = load i16, ptr @foo
 	%3 = or i16 %2, %1
-	store i16 %3, i16* @foo
+	store i16 %3, ptr @foo
 	ret void
 }
 
 define void @xor() nounwind {
 ; CHECK-LABEL: xor:
 ; CHECK: xor	&bar, &foo
-	%1 = load i16, i16* @bar
-	%2 = load i16, i16* @foo
+	%1 = load i16, ptr @bar
+	%2 = load i16, ptr @foo
 	%3 = xor i16 %2, %1
-	store i16 %3, i16* @foo
+	store i16 %3, ptr @foo
 	ret void
 }
 
@@ -57,30 +57,30 @@ entry:
  %retval = alloca i16                            ; <i16*> [#uses=3]
  %x = alloca i32, align 2                        ; <i32*> [#uses=1]
  %y = alloca i32, align 2                        ; <i32*> [#uses=1]
- store i16 0, i16* %retval
- %tmp = load i32, i32* %y                             ; <i32> [#uses=1]
- store i32 %tmp, i32* %x
- store i16 0, i16* %retval
- %0 = load i16, i16* %retval                          ; <i16> [#uses=1]
+ store i16 0, ptr %retval
+ %tmp = load i32, ptr %y                             ; <i32> [#uses=1]
+ store i32 %tmp, ptr %x
+ store i16 0, ptr %retval
+ %0 = load i16, ptr %retval                          ; <i16> [#uses=1]
  ret i16 %0
 ; CHECK-LABEL: mov2:
 ; CHECK-DAG:	mov	2(r1), 6(r1)
 ; CHECK-DAG:	mov	0(r1), 4(r1)
 }
 
-define void @cmp(i16* %g, i16* %i) {
+define void @cmp(ptr %g, ptr %i) {
 entry:
 ; CHECK-LABEL: cmp:
 ; CHECK: cmp 8(r12), 4(r13)
-  %add.ptr = getelementptr inbounds i16, i16* %g, i16 4
-  %0 = load i16, i16* %add.ptr, align 2
-  %add.ptr1 = getelementptr inbounds i16, i16* %i, i16 2
-  %1 = load i16, i16* %add.ptr1, align 2
+  %add.ptr = getelementptr inbounds i16, ptr %g, i16 4
+  %0 = load i16, ptr %add.ptr, align 2
+  %add.ptr1 = getelementptr inbounds i16, ptr %i, i16 2
+  %1 = load i16, ptr %add.ptr1, align 2
   %cmp = icmp sgt i16 %0, %1
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:                                          ; preds = %entry
-  store i16 0, i16* %g, align 2
+  store i16 0, ptr %g, align 2
   br label %if.end
 
 if.end:                                           ; preds = %if.then, %entry

diff  --git a/llvm/test/CodeGen/MSP430/Inst16mr.ll b/llvm/test/CodeGen/MSP430/Inst16mr.ll
index e3f23d9c5624e..383b951b22022 100644
--- a/llvm/test/CodeGen/MSP430/Inst16mr.ll
+++ b/llvm/test/CodeGen/MSP430/Inst16mr.ll
@@ -6,34 +6,34 @@ target triple = "msp430-generic-generic"
 define void @mov(i16 %a) nounwind {
 ; CHECK-LABEL: mov:
 ; CHECK: mov	r12, &foo
-	store i16 %a, i16* @foo
+	store i16 %a, ptr @foo
 	ret void
 }
 
 define void @add(i16 %a) nounwind {
 ; CHECK-LABEL: add:
 ; CHECK: add	r12, &foo
-	%1 = load i16, i16* @foo
+	%1 = load i16, ptr @foo
 	%2 = add i16 %a, %1
-	store i16 %2, i16* @foo
+	store i16 %2, ptr @foo
 	ret void
 }
 
 define void @and(i16 %a) nounwind {
 ; CHECK-LABEL: and:
 ; CHECK: and	r12, &foo
-	%1 = load i16, i16* @foo
+	%1 = load i16, ptr @foo
 	%2 = and i16 %a, %1
-	store i16 %2, i16* @foo
+	store i16 %2, ptr @foo
 	ret void
 }
 
 define void @bis(i16 %a) nounwind {
 ; CHECK-LABEL: bis:
 ; CHECK: bis	r12, &foo
-	%1 = load i16, i16* @foo
+	%1 = load i16, ptr @foo
 	%2 = or i16 %a, %1
-	store i16 %2, i16* @foo
+	store i16 %2, ptr @foo
 	ret void
 }
 
@@ -41,18 +41,18 @@ define void @bic(i16 zeroext %m) nounwind {
 ; CHECK-LABEL: bic:
 ; CHECK: bic   r12, &foo
         %1 = xor i16 %m, -1
-        %2 = load i16, i16* @foo
+        %2 = load i16, ptr @foo
         %3 = and i16 %2, %1
-        store i16 %3, i16* @foo
+        store i16 %3, ptr @foo
         ret void
 }
 
 define void @xor(i16 %a) nounwind {
 ; CHECK-LABEL: xor:
 ; CHECK: xor	r12, &foo
-	%1 = load i16, i16* @foo
+	%1 = load i16, ptr @foo
 	%2 = xor i16 %a, %1
-	store i16 %2, i16* @foo
+	store i16 %2, ptr @foo
 	ret void
 }
 

diff  --git a/llvm/test/CodeGen/MSP430/Inst16rm.ll b/llvm/test/CodeGen/MSP430/Inst16rm.ll
index 8a3cd0a46fb36..f4405def2c193 100644
--- a/llvm/test/CodeGen/MSP430/Inst16rm.ll
+++ b/llvm/test/CodeGen/MSP430/Inst16rm.ll
@@ -6,7 +6,7 @@ target triple = "msp430-generic-generic"
 define i16 @add(i16 %a) nounwind {
 ; CHECK-LABEL: add:
 ; CHECK: add	&foo, r12
-	%1 = load i16, i16* @foo
+	%1 = load i16, ptr @foo
 	%2 = add i16 %a, %1
 	ret i16 %2
 }
@@ -14,7 +14,7 @@ define i16 @add(i16 %a) nounwind {
 define i16 @and(i16 %a) nounwind {
 ; CHECK-LABEL: and:
 ; CHECK: and	&foo, r12
-	%1 = load i16, i16* @foo
+	%1 = load i16, ptr @foo
 	%2 = and i16 %a, %1
 	ret i16 %2
 }
@@ -22,7 +22,7 @@ define i16 @and(i16 %a) nounwind {
 define i16 @bis(i16 %a) nounwind {
 ; CHECK-LABEL: bis:
 ; CHECK: bis	&foo, r12
-	%1 = load i16, i16* @foo
+	%1 = load i16, ptr @foo
 	%2 = or i16 %a, %1
 	ret i16 %2
 }
@@ -30,7 +30,7 @@ define i16 @bis(i16 %a) nounwind {
 define i16  @bic(i16 %a) nounwind {
 ; CHECK-LABEL: bic:
 ; CHECK: bic	&foo, r12
-        %1 = load i16, i16* @foo
+        %1 = load i16, ptr @foo
         %2 = xor i16 %1, -1
         %3 = and i16 %a, %2
         ret i16 %3
@@ -39,7 +39,7 @@ define i16  @bic(i16 %a) nounwind {
 define i16 @xor(i16 %a) nounwind {
 ; CHECK-LABEL: xor:
 ; CHECK: xor	&foo, r12
-	%1 = load i16, i16* @foo
+	%1 = load i16, ptr @foo
 	%2 = xor i16 %a, %1
 	ret i16 %2
 }

diff  --git a/llvm/test/CodeGen/MSP430/Inst8mi.ll b/llvm/test/CodeGen/MSP430/Inst8mi.ll
index 36eb3f91f840a..0fb0b9395056e 100644
--- a/llvm/test/CodeGen/MSP430/Inst8mi.ll
+++ b/llvm/test/CodeGen/MSP430/Inst8mi.ll
@@ -6,43 +6,43 @@ target triple = "msp430-generic-generic"
 define void @mov() nounwind {
 ; CHECK-LABEL: mov:
 ; CHECK: mov.b	#2, &foo
-	store i8 2, i8 * @foo
+	store i8 2, ptr @foo
 	ret void
 }
 
 define void @add() nounwind {
 ; CHECK-LABEL: add:
 ; CHECK: incd.b	&foo
-	%1 = load i8, i8* @foo
+	%1 = load i8, ptr @foo
 	%2 = add i8 %1, 2
-	store i8 %2, i8 * @foo
+	store i8 %2, ptr @foo
 	ret void
 }
 
 define void @and() nounwind {
 ; CHECK-LABEL: and:
 ; CHECK: and.b	#2, &foo
-	%1 = load i8, i8* @foo
+	%1 = load i8, ptr @foo
 	%2 = and i8 %1, 2
-	store i8 %2, i8 * @foo
+	store i8 %2, ptr @foo
 	ret void
 }
 
 define void @bis() nounwind {
 ; CHECK-LABEL: bis:
 ; CHECK: bis.b	#2, &foo
-	%1 = load i8, i8* @foo
+	%1 = load i8, ptr @foo
 	%2 = or i8 %1, 2
-	store i8 %2, i8 * @foo
+	store i8 %2, ptr @foo
 	ret void
 }
 
 define void @xor() nounwind {
 ; CHECK-LABEL: xor:
 ; CHECK: xor.b	#2, &foo
-	%1 = load i8, i8* @foo
+	%1 = load i8, ptr @foo
 	%2 = xor i8 %1, 2
-	store i8 %2, i8 * @foo
+	store i8 %2, ptr @foo
 	ret void
 }
 

diff  --git a/llvm/test/CodeGen/MSP430/Inst8mm.ll b/llvm/test/CodeGen/MSP430/Inst8mm.ll
index 5709728642b4c..eec2ef6d3033e 100644
--- a/llvm/test/CodeGen/MSP430/Inst8mm.ll
+++ b/llvm/test/CodeGen/MSP430/Inst8mm.ll
@@ -8,64 +8,64 @@ target triple = "msp430-generic-generic"
 define void @mov() nounwind {
 ; CHECK-LABEL: mov:
 ; CHECK: mov.b	&bar, &foo
-        %1 = load i8, i8* @bar
-        store i8 %1, i8* @foo
+        %1 = load i8, ptr @bar
+        store i8 %1, ptr @foo
         ret void
 }
 
 define void @add() nounwind {
 ; CHECK-LABEL: add:
 ; CHECK: add.b	&bar, &foo
-	%1 = load i8, i8* @bar
-	%2 = load i8, i8* @foo
+	%1 = load i8, ptr @bar
+	%2 = load i8, ptr @foo
 	%3 = add i8 %2, %1
-	store i8 %3, i8* @foo
+	store i8 %3, ptr @foo
 	ret void
 }
 
 define void @and() nounwind {
 ; CHECK-LABEL: and:
 ; CHECK: and.b	&bar, &foo
-	%1 = load i8, i8* @bar
-	%2 = load i8, i8* @foo
+	%1 = load i8, ptr @bar
+	%2 = load i8, ptr @foo
 	%3 = and i8 %2, %1
-	store i8 %3, i8* @foo
+	store i8 %3, ptr @foo
 	ret void
 }
 
 define void @bis() nounwind {
 ; CHECK-LABEL: bis:
 ; CHECK: bis.b	&bar, &foo
-	%1 = load i8, i8* @bar
-	%2 = load i8, i8* @foo
+	%1 = load i8, ptr @bar
+	%2 = load i8, ptr @foo
 	%3 = or i8 %2, %1
-	store i8 %3, i8* @foo
+	store i8 %3, ptr @foo
 	ret void
 }
 
 define void @xor() nounwind {
 ; CHECK-LABEL: xor:
 ; CHECK: xor.b	&bar, &foo
-	%1 = load i8, i8* @bar
-	%2 = load i8, i8* @foo
+	%1 = load i8, ptr @bar
+	%2 = load i8, ptr @foo
 	%3 = xor i8 %2, %1
-	store i8 %3, i8* @foo
+	store i8 %3, ptr @foo
 	ret void
 }
 
-define void @cmp(i8* %g, i8* %i) {
+define void @cmp(ptr %g, ptr %i) {
 entry:
 ; CHECK-LABEL: cmp:
 ; CHECK: cmp.b 4(r12), 2(r13)
-  %add.ptr = getelementptr inbounds i8, i8* %g, i16 4
-  %0 = load i8, i8* %add.ptr, align 1
-  %add.ptr1 = getelementptr inbounds i8, i8* %i, i16 2
-  %1 = load i8, i8* %add.ptr1, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %g, i16 4
+  %0 = load i8, ptr %add.ptr, align 1
+  %add.ptr1 = getelementptr inbounds i8, ptr %i, i16 2
+  %1 = load i8, ptr %add.ptr1, align 1
   %cmp = icmp sgt i8 %0, %1
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:                                          ; preds = %entry
-  store i8 0, i8* %g, align 2
+  store i8 0, ptr %g, align 2
   br label %if.end
 
 if.end:                                           ; preds = %if.then, %entry

diff  --git a/llvm/test/CodeGen/MSP430/Inst8mr.ll b/llvm/test/CodeGen/MSP430/Inst8mr.ll
index 7fbdff257fe7c..bad7c9cbe3c3a 100644
--- a/llvm/test/CodeGen/MSP430/Inst8mr.ll
+++ b/llvm/test/CodeGen/MSP430/Inst8mr.ll
@@ -6,34 +6,34 @@ target triple = "msp430-generic-generic"
 define void @mov(i8 %a) nounwind {
 ; CHECK-LABEL: mov:
 ; CHECK: mov.b	r12, &foo
-	store i8 %a, i8* @foo
+	store i8 %a, ptr @foo
 	ret void
 }
 
 define void @and(i8 %a) nounwind {
 ; CHECK-LABEL: and:
 ; CHECK: and.b	r12, &foo
-	%1 = load i8, i8* @foo
+	%1 = load i8, ptr @foo
 	%2 = and i8 %a, %1
-	store i8 %2, i8* @foo
+	store i8 %2, ptr @foo
 	ret void
 }
 
 define void @add(i8 %a) nounwind {
 ; CHECK-LABEL: add:
 ; CHECK: add.b	r12, &foo
-	%1 = load i8, i8* @foo
+	%1 = load i8, ptr @foo
 	%2 = add i8 %a, %1
-	store i8 %2, i8* @foo
+	store i8 %2, ptr @foo
 	ret void
 }
 
 define void @bis(i8 %a) nounwind {
 ; CHECK-LABEL: bis:
 ; CHECK: bis.b	r12, &foo
-	%1 = load i8, i8* @foo
+	%1 = load i8, ptr @foo
 	%2 = or i8 %a, %1
-	store i8 %2, i8* @foo
+	store i8 %2, ptr @foo
 	ret void
 }
 
@@ -41,18 +41,18 @@ define void @bic(i8 zeroext %m) nounwind {
 ; CHECK-LABEL: bic:
 ; CHECK: bic.b   r12, &foo
         %1 = xor i8 %m, -1
-        %2 = load i8, i8* @foo
+        %2 = load i8, ptr @foo
         %3 = and i8 %2, %1
-        store i8 %3, i8* @foo
+        store i8 %3, ptr @foo
         ret void
 }
 
 define void @xor(i8 %a) nounwind {
 ; CHECK-LABEL: xor:
 ; CHECK: xor.b	r12, &foo
-	%1 = load i8, i8* @foo
+	%1 = load i8, ptr @foo
 	%2 = xor i8 %a, %1
-	store i8 %2, i8* @foo
+	store i8 %2, ptr @foo
 	ret void
 }
 

diff  --git a/llvm/test/CodeGen/MSP430/Inst8rm.ll b/llvm/test/CodeGen/MSP430/Inst8rm.ll
index 826a3c65ec949..95f0835026b64 100644
--- a/llvm/test/CodeGen/MSP430/Inst8rm.ll
+++ b/llvm/test/CodeGen/MSP430/Inst8rm.ll
@@ -6,7 +6,7 @@ target triple = "msp430-generic-generic"
 define i8 @add(i8 %a) nounwind {
 ; CHECK-LABEL: add:
 ; CHECK: add.b	&foo, r12
-	%1 = load i8, i8* @foo
+	%1 = load i8, ptr @foo
 	%2 = add i8 %a, %1
 	ret i8 %2
 }
@@ -14,7 +14,7 @@ define i8 @add(i8 %a) nounwind {
 define i8 @and(i8 %a) nounwind {
 ; CHECK-LABEL: and:
 ; CHECK: and.b	&foo, r12
-	%1 = load i8, i8* @foo
+	%1 = load i8, ptr @foo
 	%2 = and i8 %a, %1
 	ret i8 %2
 }
@@ -22,7 +22,7 @@ define i8 @and(i8 %a) nounwind {
 define i8 @bis(i8 %a) nounwind {
 ; CHECK-LABEL: bis:
 ; CHECK: bis.b	&foo, r12
-	%1 = load i8, i8* @foo
+	%1 = load i8, ptr @foo
 	%2 = or i8 %a, %1
 	ret i8 %2
 }
@@ -30,7 +30,7 @@ define i8 @bis(i8 %a) nounwind {
 define i8  @bic(i8 %a) nounwind {
 ; CHECK-LABEL: bic:
 ; CHECK: bic.b  &foo, r12
-        %1 = load i8, i8* @foo
+        %1 = load i8, ptr @foo
         %2 = xor i8 %1, -1
         %3 = and i8 %a, %2
         ret i8 %3
@@ -39,7 +39,7 @@ define i8  @bic(i8 %a) nounwind {
 define i8 @xor(i8 %a) nounwind {
 ; CHECK-LABEL: xor:
 ; CHECK: xor.b	&foo, r12
-	%1 = load i8, i8* @foo
+	%1 = load i8, ptr @foo
 	%2 = xor i8 %a, %1
 	ret i8 %2
 }

diff  --git a/llvm/test/CodeGen/MSP430/InstII.ll b/llvm/test/CodeGen/MSP430/InstII.ll
index 596d5b045beb5..abb1dd00b2ab5 100644
--- a/llvm/test/CodeGen/MSP430/InstII.ll
+++ b/llvm/test/CodeGen/MSP430/InstII.ll
@@ -2,67 +2,67 @@
 target datalayout = "e-p:16:8:8-i8:8:8-i16:8:8-i32:8:8"
 target triple = "msp430-generic-generic"
 
-define void @rra8m(i8* %i) {
+define void @rra8m(ptr %i) {
 entry:
 ; CHECK-LABEL: rra8m:
 ; CHECK: rra.b 2(r12)
-  %0 = getelementptr inbounds i8, i8* %i, i16 2
-  %1 = load i8, i8* %0, align 1
+  %0 = getelementptr inbounds i8, ptr %i, i16 2
+  %1 = load i8, ptr %0, align 1
   %shr = ashr i8 %1, 1
-  store i8 %shr, i8* %0, align 1
+  store i8 %shr, ptr %0, align 1
   ret void
 }
 
-define void @rra16m(i16* %i) {
+define void @rra16m(ptr %i) {
 entry:
 ; CHECK-LABEL: rra16m:
 ; CHECK: rra 4(r12)
-  %0 = getelementptr inbounds i16, i16* %i, i16 2
-  %1 = load i16, i16* %0, align 2
+  %0 = getelementptr inbounds i16, ptr %i, i16 2
+  %1 = load i16, ptr %0, align 2
   %shr = ashr i16 %1, 1
-  store i16 %shr, i16* %0, align 2
+  store i16 %shr, ptr %0, align 2
   ret void
 }
 
 ; TODO: `clrc; rrc.b 2(r12)` is expected
-define void @rrc8m(i8* %g) {
+define void @rrc8m(ptr %g) {
 entry:
 ; CHECK-LABEL: rrc8m:
 ; CHECK: mov.b 2(r12), r13
 ; CHECK: clrc
 ; CHECK: rrc.b r13
 ; CHECK: mov.b r13, 2(r12)
-  %add.ptr = getelementptr inbounds i8, i8* %g, i16 2
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %g, i16 2
+  %0 = load i8, ptr %add.ptr, align 1
   %1 = lshr i8 %0, 1
-  store i8 %1, i8* %add.ptr, align 1
+  store i8 %1, ptr %add.ptr, align 1
   ret void
 }
 
 ; TODO: `clrc; rrc 4(r12)` is expected
-define void @rrc16m(i16* %g) {
+define void @rrc16m(ptr %g) {
 entry:
 ; CHECK-LABEL: rrc16m:
 ; CHECK: mov 4(r12), r13
 ; CHECK: clrc
 ; CHECK: rrc r13
 ; CHECK: mov r13, 4(r12)
-  %add.ptr = getelementptr inbounds i16, i16* %g, i16 2
-  %0 = load i16, i16* %add.ptr, align 2
+  %add.ptr = getelementptr inbounds i16, ptr %g, i16 2
+  %0 = load i16, ptr %add.ptr, align 2
   %shr = lshr i16 %0, 1
-  store i16 %shr, i16* %add.ptr, align 2
+  store i16 %shr, ptr %add.ptr, align 2
   ret void
 }
 
-define void @sxt16m(i16* %x) {
+define void @sxt16m(ptr %x) {
 entry:
 ; CHECK-LABEL: sxt16m:
 ; CHECK: sxt 4(r12)
-  %add.ptr = getelementptr inbounds i16, i16* %x, i16 2
-  %0 = bitcast i16* %add.ptr to i8*
-  %1 = load i8, i8* %0, align 1
+  %add.ptr = getelementptr inbounds i16, ptr %x, i16 2
+  %0 = bitcast ptr %add.ptr to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = sext i8 %1 to i16
-  store i16 %conv, i16* %add.ptr, align 2
+  store i16 %conv, ptr %add.ptr, align 2
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/MSP430/bit.ll b/llvm/test/CodeGen/MSP430/bit.ll
index a4b781243b4aa..a67fcdfe5f96c 100644
--- a/llvm/test/CodeGen/MSP430/bit.ll
+++ b/llvm/test/CodeGen/MSP430/bit.ll
@@ -33,7 +33,7 @@ define i8 @bitbir(i8 %a) nounwind {
 ; CHECK: bit.b	#15, r12
 
 define i8 @bitbmi() nounwind {
-	%t1 = load i8, i8* @foo8
+	%t1 = load i8, ptr @foo8
 	%t2 = and i8 %t1, 15
 	%t3 = icmp ne i8 %t2, 0
 	%t4 = zext i1 %t3 to i8
@@ -43,7 +43,7 @@ define i8 @bitbmi() nounwind {
 ; CHECK: bit.b	#15, &foo8
 
 define i8 @bitbim() nounwind {
-	%t1 = load i8, i8* @foo8
+	%t1 = load i8, ptr @foo8
 	%t2 = and i8 15, %t1
 	%t3 = icmp ne i8 %t2, 0
 	%t4 = zext i1 %t3 to i8
@@ -53,7 +53,7 @@ define i8 @bitbim() nounwind {
 ; CHECK: bit.b	#15, &foo8
 
 define i8 @bitbrm(i8 %a) nounwind {
-	%t1 = load i8, i8* @foo8
+	%t1 = load i8, ptr @foo8
 	%t2 = and i8 %a, %t1
 	%t3 = icmp ne i8 %t2, 0
 	%t4 = zext i1 %t3 to i8
@@ -63,7 +63,7 @@ define i8 @bitbrm(i8 %a) nounwind {
 ; CHECK: bit.b	&foo8, r12
 
 define i8 @bitbmr(i8 %a) nounwind {
-	%t1 = load i8, i8* @foo8
+	%t1 = load i8, ptr @foo8
 	%t2 = and i8 %t1, %a
 	%t3 = icmp ne i8 %t2, 0
 	%t4 = zext i1 %t3 to i8
@@ -73,8 +73,8 @@ define i8 @bitbmr(i8 %a) nounwind {
 ; CHECK: bit.b	r12, &foo8
 
 define i8 @bitbmm() nounwind {
-	%t1 = load i8, i8* @foo8
-	%t2 = load i8, i8* @bar8
+	%t1 = load i8, ptr @foo8
+	%t2 = load i8, ptr @bar8
 	%t3 = and i8 %t1, %t2
 	%t4 = icmp ne i8 %t3, 0
 	%t5 = zext i1 %t4 to i8
@@ -114,7 +114,7 @@ define i16 @bitwir(i16 %a) nounwind {
 ; CHECK: bit	#4080, r12
 
 define i16 @bitwmi() nounwind {
-	%t1 = load i16, i16* @foo16
+	%t1 = load i16, ptr @foo16
 	%t2 = and i16 %t1, 4080
 	%t3 = icmp ne i16 %t2, 0
 	%t4 = zext i1 %t3 to i16
@@ -124,7 +124,7 @@ define i16 @bitwmi() nounwind {
 ; CHECK: bit	#4080, &foo16
 
 define i16 @bitwim() nounwind {
-	%t1 = load i16, i16* @foo16
+	%t1 = load i16, ptr @foo16
 	%t2 = and i16 4080, %t1
 	%t3 = icmp ne i16 %t2, 0
 	%t4 = zext i1 %t3 to i16
@@ -134,7 +134,7 @@ define i16 @bitwim() nounwind {
 ; CHECK: bit	#4080, &foo16
 
 define i16 @bitwrm(i16 %a) nounwind {
-	%t1 = load i16, i16* @foo16
+	%t1 = load i16, ptr @foo16
 	%t2 = and i16 %a, %t1
 	%t3 = icmp ne i16 %t2, 0
 	%t4 = zext i1 %t3 to i16
@@ -144,7 +144,7 @@ define i16 @bitwrm(i16 %a) nounwind {
 ; CHECK: bit	&foo16, r12
 
 define i16 @bitwmr(i16 %a) nounwind {
-	%t1 = load i16, i16* @foo16
+	%t1 = load i16, ptr @foo16
 	%t2 = and i16 %t1, %a
 	%t3 = icmp ne i16 %t2, 0
 	%t4 = zext i1 %t3 to i16
@@ -154,8 +154,8 @@ define i16 @bitwmr(i16 %a) nounwind {
 ; CHECK: bit	r12, &foo16
 
 define i16 @bitwmm() nounwind {
-	%t1 = load i16, i16* @foo16
-	%t2 = load i16, i16* @bar16
+	%t1 = load i16, ptr @foo16
+	%t2 = load i16, ptr @bar16
 	%t3 = and i16 %t1, %t2
 	%t4 = icmp ne i16 %t3, 0
 	%t5 = zext i1 %t4 to i16

diff  --git a/llvm/test/CodeGen/MSP430/byval.ll b/llvm/test/CodeGen/MSP430/byval.ll
index 5faeba5f7c6e0..ca9c8a7578be3 100644
--- a/llvm/test/CodeGen/MSP430/byval.ll
+++ b/llvm/test/CodeGen/MSP430/byval.ll
@@ -6,12 +6,12 @@ target triple = "msp430---elf"
 %struct.Foo = type { i16, i16, i16 }
 @foo = global %struct.Foo { i16 1, i16 2, i16 3 }, align 2
 
-define i16 @callee(%struct.Foo* byval(%struct.Foo) %f) nounwind {
+define i16 @callee(ptr byval(%struct.Foo) %f) nounwind {
 entry:
 ; CHECK-LABEL: callee:
 ; CHECK: mov 2(r1), r12
-  %0 = getelementptr inbounds %struct.Foo, %struct.Foo* %f, i32 0, i32 0
-  %1 = load i16, i16* %0, align 2
+  %0 = getelementptr inbounds %struct.Foo, ptr %f, i32 0, i32 0
+  %1 = load i16, ptr %0, align 2
   ret i16 %1
 }
 
@@ -21,6 +21,6 @@ entry:
 ; CHECK: mov &foo+4, 4(r1)
 ; CHECK-NEXT: mov &foo+2, 2(r1)
 ; CHECK-NEXT: mov &foo, 0(r1)
-  %call = call i16 @callee(%struct.Foo* byval(%struct.Foo) @foo)
+  %call = call i16 @callee(ptr byval(%struct.Foo) @foo)
   ret void
 }

diff  --git a/llvm/test/CodeGen/MSP430/callee-saved.ll b/llvm/test/CodeGen/MSP430/callee-saved.ll
index 4f323298dd42e..5395f30bc9afb 100644
--- a/llvm/test/CodeGen/MSP430/callee-saved.ll
+++ b/llvm/test/CodeGen/MSP430/callee-saved.ll
@@ -37,18 +37,18 @@ define void @foo() {
 ; CHECK: .cfi_offset r9, -14
 ; CHECK: .cfi_offset r10, -16
 
-  %t1 = load volatile float, float* @g
-  %t2 = load volatile float, float* @g
-  %t3 = load volatile float, float* @g
-  %t4 = load volatile float, float* @g
-  %t5 = load volatile float, float* @g
-  %t6 = load volatile float, float* @g
-  %t7 = load volatile float, float* @g
-  store volatile float %t1, float* @g
-  store volatile float %t2, float* @g
-  store volatile float %t3, float* @g
-  store volatile float %t4, float* @g
-  store volatile float %t5, float* @g
-  store volatile float %t6, float* @g
+  %t1 = load volatile float, ptr @g
+  %t2 = load volatile float, ptr @g
+  %t3 = load volatile float, ptr @g
+  %t4 = load volatile float, ptr @g
+  %t5 = load volatile float, ptr @g
+  %t6 = load volatile float, ptr @g
+  %t7 = load volatile float, ptr @g
+  store volatile float %t1, ptr @g
+  store volatile float %t2, ptr @g
+  store volatile float %t3, ptr @g
+  store volatile float %t4, ptr @g
+  store volatile float %t5, ptr @g
+  store volatile float %t6, ptr @g
   ret void
 }

diff  --git a/llvm/test/CodeGen/MSP430/calls.ll b/llvm/test/CodeGen/MSP430/calls.ll
index c5540bf1f9991..4d3b7e328a84e 100644
--- a/llvm/test/CodeGen/MSP430/calls.ll
+++ b/llvm/test/CodeGen/MSP430/calls.ll
@@ -12,7 +12,7 @@ define i32 @test_direct(i32 %a) nounwind {
   ret i32 %1
 }
 
-define i16 @test_indirect(i16 (i16)* %a, i16 %b) nounwind {
+define i16 @test_indirect(ptr %a, i16 %b) nounwind {
 ; CHECK-LABEL: test_indirect:
 ; CHECK: mov	r12, r14
 ; CHECK: mov	r13, r12

diff  --git a/llvm/test/CodeGen/MSP430/cc_args.ll b/llvm/test/CodeGen/MSP430/cc_args.ll
index 2bd29d727b40d..aba38d79470e9 100644
--- a/llvm/test/CodeGen/MSP430/cc_args.ll
+++ b/llvm/test/CodeGen/MSP430/cc_args.ll
@@ -87,7 +87,7 @@ entry:
 define void @f_i16(i16 %a) #0 {
 ; CHECK: f_i16:
 ; CHECK: mov r12, &g_i16
-  store volatile i16 %a, i16* @g_i16, align 2
+  store volatile i16 %a, ptr @g_i16, align 2
   ret void
 }
 
@@ -95,7 +95,7 @@ define void @f_i32(i32 %a) #0 {
 ; CHECK: f_i32:
 ; CHECK: mov r13, &g_i32+2
 ; CHECK: mov r12, &g_i32
-  store volatile i32 %a, i32* @g_i32, align 2
+  store volatile i32 %a, ptr @g_i32, align 2
   ret void
 }
 
@@ -105,7 +105,7 @@ define void @f_i64(i64 %a) #0 {
 ; CHECK: mov r14, &g_i64+4
 ; CHECK: mov r13, &g_i64+2
 ; CHECK: mov r12, &g_i64
-  store volatile i64 %a, i64* @g_i64, align 2
+  store volatile i64 %a, ptr @g_i64, align 2
   ret void
 }
 
@@ -113,49 +113,49 @@ define void @f_i32_i32(i32 %a, i32 %b) #0 {
 ; CHECK: f_i32_i32:
 ; CHECK: mov r13, &g_i32+2
 ; CHECK: mov r12, &g_i32
-  store volatile i32 %a, i32* @g_i32, align 2
+  store volatile i32 %a, ptr @g_i32, align 2
 ; CHECK: mov r15, &g_i32+2
 ; CHECK: mov r14, &g_i32
-  store volatile i32 %b, i32* @g_i32, align 2
+  store volatile i32 %b, ptr @g_i32, align 2
   ret void
 }
 
 define void @f_i16_i32_i32(i16 %a, i32 %b, i32 %c) #0 {
 ; CHECK: f_i16_i32_i32:
 ; CHECK: mov r12, &g_i16
-  store volatile i16 %a, i16* @g_i16, align 2
+  store volatile i16 %a, ptr @g_i16, align 2
 ; CHECK: mov r14, &g_i32+2
 ; CHECK: mov r13, &g_i32
-  store volatile i32 %b, i32* @g_i32, align 2
+  store volatile i32 %b, ptr @g_i32, align 2
 ; CHECK: mov r15, &g_i32
 ; CHECK: mov 4(r4), &g_i32+2
-  store volatile i32 %c, i32* @g_i32, align 2
+  store volatile i32 %c, ptr @g_i32, align 2
   ret void
 }
 
 define void @f_i16_i32_i16(i16 %a, i32 %b, i16 %c) #0 {
 ; CHECK: f_i16_i32_i16:
 ; CHECK: mov r12, &g_i16
-  store volatile i16 %a, i16* @g_i16, align 2
+  store volatile i16 %a, ptr @g_i16, align 2
 ; CHECK: mov r14, &g_i32+2
 ; CHECK: mov r13, &g_i32
-  store volatile i32 %b, i32* @g_i32, align 2
+  store volatile i32 %b, ptr @g_i32, align 2
 ; CHECK: mov r15, &g_i16
-  store volatile i16 %c, i16* @g_i16, align 2
+  store volatile i16 %c, ptr @g_i16, align 2
   ret void
 }
 
 define void @f_i16_i64_i16(i16 %a, i64 %b, i16 %c) #0 {
 ; CHECK: f_i16_i64_i16:
 ; CHECK: mov r12, &g_i16
-  store volatile i16 %a, i16* @g_i16, align 2
+  store volatile i16 %a, ptr @g_i16, align 2
 ;CHECK: mov 10(r4), &g_i64+6
 ;CHECK: mov 8(r4), &g_i64+4
 ;CHECK: mov 6(r4), &g_i64+2
 ;CHECK: mov 4(r4), &g_i64
-  store volatile i64 %b, i64* @g_i64, align 2
+  store volatile i64 %b, ptr @g_i64, align 2
 ;CHECK: mov r13, &g_i16
-  store volatile i16 %c, i16* @g_i16, align 2
+  store volatile i16 %c, ptr @g_i16, align 2
   ret void
 }
 
@@ -165,30 +165,30 @@ define void @f_i64_i64(i64 %a, i64 %b) #0 {
 ; CHECK: mov	r14, &g_i64+4
 ; CHECK: mov	r13, &g_i64+2
 ; CHECK: mov	r12, &g_i64
-  store volatile i64 %a, i64* @g_i64, align 2
+  store volatile i64 %a, ptr @g_i64, align 2
 ; CHECK-DAG: mov	10(r4), &g_i64+6
 ; CHECK-DAG: mov	8(r4), &g_i64+4
 ; CHECK-DAG: mov	6(r4), &g_i64+2
 ; CHECK-DAG: mov	4(r4), &g_i64
-  store volatile i64 %b, i64* @g_i64, align 2
+  store volatile i64 %b, ptr @g_i64, align 2
   ret void
 }
 
 define void @f_i16_i64_i32_i32(i16 %a, i64 %b, i32 %c, i32 %d) #0 {
 ; CHECK-LABEL: f_i16_i64_i32_i32:
 ; CHECK: mov	r12, &g_i16
-  store volatile i16 %a, i16* @g_i16, align 2
+  store volatile i16 %a, ptr @g_i16, align 2
 ; CHECK: mov	10(r4), &g_i64+6
 ; CHECK: mov	8(r4), &g_i64+4
 ; CHECK: mov	6(r4), &g_i64+2
 ; CHECK: mov	4(r4), &g_i64
-  store volatile i64 %b, i64* @g_i64, align 2
+  store volatile i64 %b, ptr @g_i64, align 2
 ; CHECK: mov	r14, &g_i32+2
 ; CHECK: mov	r13, &g_i32
-  store volatile i32 %c, i32* @g_i32, align 2
+  store volatile i32 %c, ptr @g_i32, align 2
 ; CHECK: mov	14(r4), &g_i32+2
 ; CHECK: mov	12(r4), &g_i32
-  store volatile i32 %d, i32* @g_i32, align 2
+  store volatile i32 %d, ptr @g_i32, align 2
   ret void
 }
 ; MSP430 EABI p. 6.3
@@ -199,8 +199,8 @@ define void @f_i16_i64_i32_i32(i16 %a, i64 %b, i32 %c, i32 %d) #0 {
 @g_i64_2 = common global i64 0, align 2
 
 define i64 @helper_call_i64() #0 {
-  %1 = load i64, i64* @g_i64, align 2
-  %2 = load i64, i64* @g_i64_2, align 2
+  %1 = load i64, ptr @g_i64, align 2
+  %2 = load i64, ptr @g_i64_2, align 2
 ; CHECK-LABEL: helper_call_i64:
 ; CHECK: mov	&g_i64, r8
 ; CHECK: mov	&g_i64+2, r9

diff  --git a/llvm/test/CodeGen/MSP430/cc_ret.ll b/llvm/test/CodeGen/MSP430/cc_ret.ll
index e5bd61941bc7e..31cca30652d96 100644
--- a/llvm/test/CodeGen/MSP430/cc_ret.ll
+++ b/llvm/test/CodeGen/MSP430/cc_ret.ll
@@ -10,13 +10,13 @@ entry:
 ; CHECK: call #f_i16
 ; CHECK: mov r12, &g_i16
   %0 = call i16 @f_i16()
-  store volatile i16 %0, i16* @g_i16
+  store volatile i16 %0, ptr @g_i16
 
 ; CHECK: call #f_i32
 ; CHECK: mov r13, &g_i32+2
 ; CHECK: mov r12, &g_i32
   %1 = call i32 @f_i32()
-  store volatile i32 %1, i32* @g_i32
+  store volatile i32 %1, ptr @g_i32
 
 ; CHECK: call #f_i64
 ; CHECK: mov r15, &g_i64+6
@@ -24,7 +24,7 @@ entry:
 ; CHECK: mov r13, &g_i64+2
 ; CHECK: mov r12, &g_i64
   %2 = call i64 @f_i64()
-  store volatile i64 %2, i64* @g_i64
+  store volatile i64 %2, ptr @g_i64
 
   ret void
 }

diff  --git a/llvm/test/CodeGen/MSP430/fp.ll b/llvm/test/CodeGen/MSP430/fp.ll
index bf603704a91b6..bb519fd1778b0 100644
--- a/llvm/test/CodeGen/MSP430/fp.ll
+++ b/llvm/test/CodeGen/MSP430/fp.ll
@@ -11,7 +11,7 @@ entry:
 ; CHECK: sub #2, r1
   %i = alloca i16, align 2
 ; CHECK: clr -2(r4)
-  store i16 0, i16* %i, align 2
+  store i16 0, ptr %i, align 2
 ; CHECK: pop r4
   ret void
 }

diff  --git a/llvm/test/CodeGen/MSP430/hwmult16.ll b/llvm/test/CodeGen/MSP430/hwmult16.ll
index 87b6a7aeacf5c..01836349cc2d3 100644
--- a/llvm/test/CodeGen/MSP430/hwmult16.ll
+++ b/llvm/test/CodeGen/MSP430/hwmult16.ll
@@ -13,7 +13,7 @@ entry:
 ; CHECK: mpyi:
 
 ; CHECK: call #__mspabi_mpyi_hw
-  %0 = load volatile i16, i16* @g_i16, align 8
+  %0 = load volatile i16, ptr @g_i16, align 8
   %1 = mul i16 %0, %0
 
   ret i16 %1
@@ -24,7 +24,7 @@ entry:
 ; CHECK: mpyli:
 
 ; CHECK: call #__mspabi_mpyl_hw
-  %0 = load volatile i32, i32* @g_i32, align 8
+  %0 = load volatile i32, ptr @g_i32, align 8
   %1 = mul i32 %0, %0
 
   ret i32 %1
@@ -35,7 +35,7 @@ entry:
 ; CHECK: mpylli:
 
 ; CHECK: call #__mspabi_mpyll_hw
-  %0 = load volatile i64, i64* @g_i64, align 8
+  %0 = load volatile i64, ptr @g_i64, align 8
   %1 = mul i64 %0, %0
 
   ret i64 %1

diff  --git a/llvm/test/CodeGen/MSP430/hwmult32.ll b/llvm/test/CodeGen/MSP430/hwmult32.ll
index 10c831e77ffbd..56aec5766ed47 100644
--- a/llvm/test/CodeGen/MSP430/hwmult32.ll
+++ b/llvm/test/CodeGen/MSP430/hwmult32.ll
@@ -13,7 +13,7 @@ entry:
 ; CHECK: mpyi:
 
 ; CHECK: call #__mspabi_mpyi_hw
-  %0 = load volatile i16, i16* @g_i16, align 8
+  %0 = load volatile i16, ptr @g_i16, align 8
   %1 = mul i16 %0, %0
 
   ret i16 %1
@@ -24,7 +24,7 @@ entry:
 ; CHECK: mpyli:
 
 ; CHECK: call #__mspabi_mpyl_hw32
-  %0 = load volatile i32, i32* @g_i32, align 8
+  %0 = load volatile i32, ptr @g_i32, align 8
   %1 = mul i32 %0, %0
 
   ret i32 %1
@@ -35,7 +35,7 @@ entry:
 ; CHECK: mpylli:
 
 ; CHECK: call #__mspabi_mpyll_hw32
-  %0 = load volatile i64, i64* @g_i64, align 8
+  %0 = load volatile i64, ptr @g_i64, align 8
   %1 = mul i64 %0, %0
 
   ret i64 %1

diff  --git a/llvm/test/CodeGen/MSP430/hwmultf5.ll b/llvm/test/CodeGen/MSP430/hwmultf5.ll
index c57922ece7d0c..9ec754821dc88 100644
--- a/llvm/test/CodeGen/MSP430/hwmultf5.ll
+++ b/llvm/test/CodeGen/MSP430/hwmultf5.ll
@@ -13,7 +13,7 @@ entry:
 ; CHECK: mpyi:
 
 ; CHECK: call #__mspabi_mpyi_f5hw
-  %0 = load volatile i16, i16* @g_i16, align 8
+  %0 = load volatile i16, ptr @g_i16, align 8
   %1 = mul i16 %0, %0
 
   ret i16 %1
@@ -24,7 +24,7 @@ entry:
 ; CHECK: mpyli:
 
 ; CHECK: call #__mspabi_mpyl_f5hw
-  %0 = load volatile i32, i32* @g_i32, align 8
+  %0 = load volatile i32, ptr @g_i32, align 8
   %1 = mul i32 %0, %0
 
   ret i32 %1
@@ -35,7 +35,7 @@ entry:
 ; CHECK: mpylli:
 
 ; CHECK: call #__mspabi_mpyll_f5hw
-  %0 = load volatile i64, i64* @g_i64, align 8
+  %0 = load volatile i64, ptr @g_i64, align 8
   %1 = mul i64 %0, %0
 
   ret i64 %1

diff  --git a/llvm/test/CodeGen/MSP430/indirectbr.ll b/llvm/test/CodeGen/MSP430/indirectbr.ll
index af1a466b3c783..9606419867709 100644
--- a/llvm/test/CodeGen/MSP430/indirectbr.ll
+++ b/llvm/test/CodeGen/MSP430/indirectbr.ll
@@ -1,21 +1,21 @@
 ; RUN: llc -march=msp430 < %s
 
- at nextaddr = global i8* null                       ; <i8**> [#uses=2]
- at C.0.2070 = private constant [5 x i8*] [i8* blockaddress(@foo, %L1), i8* blockaddress(@foo, %L2), i8* blockaddress(@foo, %L3), i8* blockaddress(@foo, %L4), i8* blockaddress(@foo, %L5)] ; <[5 x i8*]*> [#uses=1]
+ at nextaddr = global ptr null                       ; <i8**> [#uses=2]
+ at C.0.2070 = private constant [5 x ptr] [ptr blockaddress(@foo, %L1), ptr blockaddress(@foo, %L2), ptr blockaddress(@foo, %L3), ptr blockaddress(@foo, %L4), ptr blockaddress(@foo, %L5)] ; <[5 x i8*]*> [#uses=1]
 
 define internal i16 @foo(i16 %i) nounwind {
 entry:
-  %0 = load i8*, i8** @nextaddr, align 4               ; <i8*> [#uses=2]
-  %1 = icmp eq i8* %0, null                       ; <i1> [#uses=1]
+  %0 = load ptr, ptr @nextaddr, align 4               ; <i8*> [#uses=2]
+  %1 = icmp eq ptr %0, null                       ; <i1> [#uses=1]
   br i1 %1, label %bb3, label %bb2
 
 bb2:                                              ; preds = %bb3, %entry
-  %gotovar.4.0 = phi i8* [ %gotovar.4.0.pre, %bb3 ], [ %0, %entry ] ; <i8*> [#uses=1]
-  indirectbr i8* %gotovar.4.0, [label %L5, label %L4, label %L3, label %L2, label %L1]
+  %gotovar.4.0 = phi ptr [ %gotovar.4.0.pre, %bb3 ], [ %0, %entry ] ; <i8*> [#uses=1]
+  indirectbr ptr %gotovar.4.0, [label %L5, label %L4, label %L3, label %L2, label %L1]
 
 bb3:                                              ; preds = %entry
-  %2 = getelementptr inbounds [5 x i8*], [5 x i8*]* @C.0.2070, i16 0, i16 %i ; <i8**> [#uses=1]
-  %gotovar.4.0.pre = load i8*, i8** %2, align 4        ; <i8*> [#uses=1]
+  %2 = getelementptr inbounds [5 x ptr], ptr @C.0.2070, i16 0, i16 %i ; <i8**> [#uses=1]
+  %gotovar.4.0.pre = load ptr, ptr %2, align 4        ; <i8*> [#uses=1]
   br label %bb2
 
 L5:                                               ; preds = %bb2
@@ -36,6 +36,6 @@ L2:                                               ; preds = %L3, %bb2
 
 L1:                                               ; preds = %L2, %bb2
   %res.3 = phi i16 [ %phitmp, %L2 ], [ 2, %bb2 ]  ; <i16> [#uses=1]
-  store i8* blockaddress(@foo, %L5), i8** @nextaddr, align 4
+  store ptr blockaddress(@foo, %L5), ptr @nextaddr, align 4
   ret i16 %res.3
 }

diff  --git a/llvm/test/CodeGen/MSP430/indirectbr2.ll b/llvm/test/CodeGen/MSP430/indirectbr2.ll
index b0b4f1cbfd24f..954f2480adbea 100644
--- a/llvm/test/CodeGen/MSP430/indirectbr2.ll
+++ b/llvm/test/CodeGen/MSP430/indirectbr2.ll
@@ -1,12 +1,12 @@
 ; RUN: llc -march=msp430 < %s | FileCheck %s
- at C.0.2070 = private constant [5 x i8*] [i8* blockaddress(@foo, %L1), i8* blockaddress(@foo, %L2), i8* blockaddress(@foo, %L3), i8* blockaddress(@foo, %L4), i8* blockaddress(@foo, %L5)] ; <[5 x i8*]*> [#uses=1]
+ at C.0.2070 = private constant [5 x ptr] [ptr blockaddress(@foo, %L1), ptr blockaddress(@foo, %L2), ptr blockaddress(@foo, %L3), ptr blockaddress(@foo, %L4), ptr blockaddress(@foo, %L5)] ; <[5 x i8*]*> [#uses=1]
 
 define internal i16 @foo(i16 %i) nounwind {
 entry:
-  %tmp1 = getelementptr inbounds [5 x i8*], [5 x i8*]* @C.0.2070, i16 0, i16 %i ; <i8**> [#uses=1]
-  %gotovar.4.0 = load i8*, i8** %tmp1, align 4        ; <i8*> [#uses=1]
+  %tmp1 = getelementptr inbounds [5 x ptr], ptr @C.0.2070, i16 0, i16 %i ; <i8**> [#uses=1]
+  %gotovar.4.0 = load ptr, ptr %tmp1, align 4        ; <i8*> [#uses=1]
 ; CHECK: br .LC.0.2070(r12)
-  indirectbr i8* %gotovar.4.0, [label %L5, label %L4, label %L3, label %L2, label %L1]
+  indirectbr ptr %gotovar.4.0, [label %L5, label %L4, label %L3, label %L2, label %L1]
 
 L5:                                               ; preds = %bb2
   br label %L4

diff  --git a/llvm/test/CodeGen/MSP430/inline-asm-absolute-addressing.ll b/llvm/test/CodeGen/MSP430/inline-asm-absolute-addressing.ll
index ee730a128b085..6aa57099ee1e9 100644
--- a/llvm/test/CodeGen/MSP430/inline-asm-absolute-addressing.ll
+++ b/llvm/test/CodeGen/MSP430/inline-asm-absolute-addressing.ll
@@ -10,6 +10,6 @@ target triple = "msp430-elf"
 define void @f() {
 entry:
 ; CHECK: mov r1, &256
-  call void asm sideeffect "mov r1, $0", "*m"(i8* elementtype(i8) inttoptr (i16 256 to i8*))
+  call void asm sideeffect "mov r1, $0", "*m"(ptr elementtype(i8) inttoptr (i16 256 to ptr))
   ret void
 }

diff  --git a/llvm/test/CodeGen/MSP430/inline-asm.ll b/llvm/test/CodeGen/MSP430/inline-asm.ll
index a2f13235b1d30..3ea3036714706 100644
--- a/llvm/test/CodeGen/MSP430/inline-asm.ll
+++ b/llvm/test/CodeGen/MSP430/inline-asm.ll
@@ -15,12 +15,12 @@ define void @reg(i16 %a) nounwind {
 @foo = global i16 0, align 2
 
 define void @immmem() nounwind {
-        call void asm sideeffect "bic\09$0,r2", "i"(i16* getelementptr(i16, i16* @foo, i32 1)) nounwind
+        call void asm sideeffect "bic\09$0,r2", "i"(ptr getelementptr(i16, ptr @foo, i32 1)) nounwind
         ret void
 }
 
 define void @mem() nounwind {
-        %fooval = load i16, i16* @foo
+        %fooval = load i16, ptr @foo
         call void asm sideeffect "bic\09$0,r2", "m"(i16 %fooval) nounwind
         ret void
 }

diff  --git a/llvm/test/CodeGen/MSP430/inlineasm-output-template.ll b/llvm/test/CodeGen/MSP430/inlineasm-output-template.ll
index 9b8ca7e58e963..e09f61d3185d8 100644
--- a/llvm/test/CodeGen/MSP430/inlineasm-output-template.ll
+++ b/llvm/test/CodeGen/MSP430/inlineasm-output-template.ll
@@ -13,7 +13,7 @@ define dso_local i32 @test_inlineasm_c_output_template0() {
 ; CHECK: ;TEST baz
 @baz = internal global i32 0, align 4
 define dso_local i32 @test_inlineasm_c_output_template2() {
-  tail call void asm sideeffect ";TEST ${0:c}", "i"(i32* nonnull @baz)
+  tail call void asm sideeffect ";TEST ${0:c}", "i"(ptr nonnull @baz)
   ret i32 42
 }
 

diff  --git a/llvm/test/CodeGen/MSP430/interrupt.ll b/llvm/test/CodeGen/MSP430/interrupt.ll
index 47e72dd1cefc8..5459b8807472c 100644
--- a/llvm/test/CodeGen/MSP430/interrupt.ll
+++ b/llvm/test/CodeGen/MSP430/interrupt.ll
@@ -3,7 +3,7 @@
 target datalayout = "e-m:e-p:16:16-i32:16-i64:16-f32:16-f64:16-a:8-n8:16-S16"
 target triple = "msp430-generic-generic"
 
- at llvm.used = appending global [1 x i8*] [i8* bitcast (void ()* @ISR to i8*)], section "llvm.metadata"
+ at llvm.used = appending global [1 x ptr] [ptr @ISR], section "llvm.metadata"
 
 ; MSP430 EABI p. 3.9
 ; Interrupt functions must save all the registers that are used, even those
@@ -33,19 +33,19 @@ entry:
 ; CHECK: push r13
 ; CHECK: push r14
 ; CHECK: push r15
-  %t1 = load volatile float, float* @g
-  %t2 = load volatile float, float* @g
-  %t3 = load volatile float, float* @g
-  %t4 = load volatile float, float* @g
-  %t5 = load volatile float, float* @g
-  %t6 = load volatile float, float* @g
-  %t7 = load volatile float, float* @g
-  store volatile float %t1, float* @g
-  store volatile float %t2, float* @g
-  store volatile float %t3, float* @g
-  store volatile float %t4, float* @g
-  store volatile float %t5, float* @g
-  store volatile float %t6, float* @g
+  %t1 = load volatile float, ptr @g
+  %t2 = load volatile float, ptr @g
+  %t3 = load volatile float, ptr @g
+  %t4 = load volatile float, ptr @g
+  %t5 = load volatile float, ptr @g
+  %t6 = load volatile float, ptr @g
+  %t7 = load volatile float, ptr @g
+  store volatile float %t1, ptr @g
+  store volatile float %t2, ptr @g
+  store volatile float %t3, ptr @g
+  store volatile float %t4, ptr @g
+  store volatile float %t5, ptr @g
+  store volatile float %t6, ptr @g
 ; CHECK: reti
   ret void
 }

diff  --git a/llvm/test/CodeGen/MSP430/jumptable.ll b/llvm/test/CodeGen/MSP430/jumptable.ll
index 3e50123957726..7cd1b75c04691 100644
--- a/llvm/test/CodeGen/MSP430/jumptable.ll
+++ b/llvm/test/CodeGen/MSP430/jumptable.ll
@@ -13,8 +13,8 @@ entry:
 ; CHECK-NEXT: jhs     .LBB0_3
   %retval = alloca i16, align 2
   %i.addr = alloca i16, align 2
-  store i16 %i, i16* %i.addr, align 2
-  %0 = load i16, i16* %i.addr, align 2
+  store i16 %i, ptr %i.addr, align 2
+  %0 = load i16, ptr %i.addr, align 2
 ; CHECK:      add   r12, r12
 ; CHECK-NEXT: br .LJTI0_0(r12)
   switch i16 %0, label %sw.default [
@@ -25,27 +25,27 @@ entry:
   ]
 
 sw.bb:                                            ; preds = %entry
-  store i16 0, i16* %retval
+  store i16 0, ptr %retval
   br label %return
 
 sw.bb1:                                           ; preds = %entry
-  store i16 1, i16* %retval
+  store i16 1, ptr %retval
   br label %return
 
 sw.bb2:                                           ; preds = %entry
-  store i16 2, i16* %retval
+  store i16 2, ptr %retval
   br label %return
 
 sw.bb3:                                           ; preds = %entry
-  store i16 3, i16* %retval
+  store i16 3, ptr %retval
   br label %return
 
 sw.default:                                       ; preds = %entry
-  store i16 2, i16* %retval
+  store i16 2, ptr %retval
   br label %return
 
 return:                                           ; preds = %sw.default, %sw.bb3, %sw.bb2, %sw.bb1, %sw.bb
-  %1 = load i16, i16* %retval
+  %1 = load i16, ptr %retval
   ret i16 %1
 ; CHECK: ret
 }

diff  --git a/llvm/test/CodeGen/MSP430/libcalls.ll b/llvm/test/CodeGen/MSP430/libcalls.ll
index b6e24db6c551a..5d3755cbf9b0c 100644
--- a/llvm/test/CodeGen/MSP430/libcalls.ll
+++ b/llvm/test/CodeGen/MSP430/libcalls.ll
@@ -14,7 +14,7 @@ entry:
 ; CHECK: d2f:
 
 ; CHECK: call #__mspabi_cvtdf
-  %0 = load volatile double, double* @g_double, align 8
+  %0 = load volatile double, ptr @g_double, align 8
   %1 = fptrunc double %0 to float
 
   ret float %1
@@ -25,7 +25,7 @@ entry:
 ; CHECK: f2d:
 
 ; CHECK: call #__mspabi_cvtfd
-  %0 = load volatile float, float* @g_float, align 8
+  %0 = load volatile float, ptr @g_float, align 8
   %1 = fpext float %0 to double
 
   ret double %1
@@ -36,7 +36,7 @@ entry:
 ; CHECK: d2l:
 
 ; CHECK: call #__mspabi_fixdli
-  %0 = load volatile double, double* @g_double, align 8
+  %0 = load volatile double, ptr @g_double, align 8
   %1 = fptosi double %0 to i32
 
   ret i32 %1
@@ -47,7 +47,7 @@ entry:
 ; CHECK: d2ll:
 
 ; CHECK: call #__mspabi_fixdlli
-  %0 = load volatile double, double* @g_double, align 8
+  %0 = load volatile double, ptr @g_double, align 8
   %1 = fptosi double %0 to i64
 
   ret i64 %1
@@ -58,7 +58,7 @@ entry:
 ; CHECK: d2ul:
 
 ; CHECK: call #__mspabi_fixdul
-  %0 = load volatile double, double* @g_double, align 8
+  %0 = load volatile double, ptr @g_double, align 8
   %1 = fptoui double %0 to i32
 
   ret i32 %1
@@ -69,7 +69,7 @@ entry:
 ; CHECK: d2ull:
 
 ; CHECK: call #__mspabi_fixdull
-  %0 = load volatile double, double* @g_double, align 8
+  %0 = load volatile double, ptr @g_double, align 8
   %1 = fptoui double %0 to i64
 
   ret i64 %1
@@ -80,7 +80,7 @@ entry:
 ; CHECK: f2l:
 
 ; CHECK: call #__mspabi_fixfli
-  %0 = load volatile float, float* @g_float, align 8
+  %0 = load volatile float, ptr @g_float, align 8
   %1 = fptosi float %0 to i32
 
   ret i32 %1
@@ -91,7 +91,7 @@ entry:
 ; CHECK: f2ll:
 
 ; CHECK: call #__mspabi_fixflli
-  %0 = load volatile float, float* @g_float, align 8
+  %0 = load volatile float, ptr @g_float, align 8
   %1 = fptosi float %0 to i64
 
   ret i64 %1
@@ -102,7 +102,7 @@ entry:
 ; CHECK: f2ul:
 
 ; CHECK: call #__mspabi_fixful
-  %0 = load volatile float, float* @g_float, align 8
+  %0 = load volatile float, ptr @g_float, align 8
   %1 = fptoui float %0 to i32
 
   ret i32 %1
@@ -113,7 +113,7 @@ entry:
 ; CHECK: f2ull:
 
 ; CHECK: call #__mspabi_fixfull
-  %0 = load volatile float, float* @g_float, align 8
+  %0 = load volatile float, ptr @g_float, align 8
   %1 = fptoui float %0 to i64
 
   ret i64 %1
@@ -124,7 +124,7 @@ entry:
 ; CHECK: l2d:
 
 ; CHECK: call #__mspabi_fltlid
-  %0 = load volatile i32, i32* @g_i32, align 8
+  %0 = load volatile i32, ptr @g_i32, align 8
   %1 = sitofp i32 %0 to double
 
   ret double %1
@@ -135,7 +135,7 @@ entry:
 ; CHECK: ll2d:
 
 ; CHECK: call #__mspabi_fltllid
-  %0 = load volatile i64, i64* @g_i64, align 8
+  %0 = load volatile i64, ptr @g_i64, align 8
   %1 = sitofp i64 %0 to double
 
   ret double %1
@@ -146,7 +146,7 @@ entry:
 ; CHECK: ul2d:
 
 ; CHECK: call #__mspabi_fltuld
-  %0 = load volatile i32, i32* @g_i32, align 8
+  %0 = load volatile i32, ptr @g_i32, align 8
   %1 = uitofp i32 %0 to double
 
   ret double %1
@@ -157,7 +157,7 @@ entry:
 ; CHECK: ull2d:
 
 ; CHECK: call #__mspabi_fltulld
-  %0 = load volatile i64, i64* @g_i64, align 8
+  %0 = load volatile i64, ptr @g_i64, align 8
   %1 = uitofp i64 %0 to double
 
   ret double %1
@@ -168,7 +168,7 @@ entry:
 ; CHECK: l2f:
 
 ; CHECK: call #__mspabi_fltlif
-  %0 = load volatile i32, i32* @g_i32, align 8
+  %0 = load volatile i32, ptr @g_i32, align 8
   %1 = sitofp i32 %0 to float
 
   ret float %1
@@ -179,7 +179,7 @@ entry:
 ; CHECK: ll2f:
 
 ; CHECK: call #__mspabi_fltllif
-  %0 = load volatile i64, i64* @g_i64, align 8
+  %0 = load volatile i64, ptr @g_i64, align 8
   %1 = sitofp i64 %0 to float
 
   ret float %1
@@ -190,7 +190,7 @@ entry:
 ; CHECK: ul2f:
 
 ; CHECK: call #__mspabi_fltulf
-  %0 = load volatile i32, i32* @g_i32, align 8
+  %0 = load volatile i32, ptr @g_i32, align 8
   %1 = uitofp i32 %0 to float
 
   ret float %1
@@ -201,7 +201,7 @@ entry:
 ; CHECK: ull2f:
 
 ; CHECK: call #__mspabi_fltullf
-  %0 = load volatile i64, i64* @g_i64, align 8
+  %0 = load volatile i64, ptr @g_i64, align 8
   %1 = uitofp i64 %0 to float
 
   ret float %1
@@ -212,7 +212,7 @@ entry:
 ; CHECK: cmpd_oeq:
 
 ; CHECK: call #__mspabi_cmpd
-  %0 = load volatile double, double* @g_double, align 8
+  %0 = load volatile double, ptr @g_double, align 8
   %1 = fcmp oeq double %0, 123.0
 
   ret i1 %1
@@ -223,7 +223,7 @@ entry:
 ; CHECK: cmpd_une:
 
 ; CHECK: call #__mspabi_cmpd
-  %0 = load volatile double, double* @g_double, align 8
+  %0 = load volatile double, ptr @g_double, align 8
   %1 = fcmp une double %0, 123.0
 
   ret i1 %1
@@ -234,7 +234,7 @@ entry:
 ; CHECK: cmpd_oge:
 
 ; CHECK: call #__mspabi_cmpd
-  %0 = load volatile double, double* @g_double, align 8
+  %0 = load volatile double, ptr @g_double, align 8
   %1 = fcmp oge double %0, 123.0
 
   ret i1 %1
@@ -245,7 +245,7 @@ entry:
 ; CHECK: cmpd_olt:
 
 ; CHECK: call #__mspabi_cmpd
-  %0 = load volatile double, double* @g_double, align 8
+  %0 = load volatile double, ptr @g_double, align 8
   %1 = fcmp olt double %0, 123.0
 
   ret i1 %1
@@ -256,7 +256,7 @@ entry:
 ; CHECK: cmpd_ole:
 
 ; CHECK: call #__mspabi_cmpd
-  %0 = load volatile double, double* @g_double, align 8
+  %0 = load volatile double, ptr @g_double, align 8
   %1 = fcmp ole double %0, 123.0
 
   ret i1 %1
@@ -267,7 +267,7 @@ entry:
 ; CHECK: cmpd_ogt:
 
 ; CHECK: call #__mspabi_cmpd
-  %0 = load volatile double, double* @g_double, align 8
+  %0 = load volatile double, ptr @g_double, align 8
   %1 = fcmp ogt double %0, 123.0
 
   ret i1 %1
@@ -278,7 +278,7 @@ entry:
 ; CHECK: cmpf_oeq:
 
 ; CHECK: call #__mspabi_cmpf
-  %0 = load volatile float, float* @g_float, align 8
+  %0 = load volatile float, ptr @g_float, align 8
   %1 = fcmp oeq float %0, 123.0
 
   ret i1 %1
@@ -289,7 +289,7 @@ entry:
 ; CHECK: cmpf_une:
 
 ; CHECK: call #__mspabi_cmpf
-  %0 = load volatile float, float* @g_float, align 8
+  %0 = load volatile float, ptr @g_float, align 8
   %1 = fcmp une float %0, 123.0
 
   ret i1 %1
@@ -300,7 +300,7 @@ entry:
 ; CHECK: cmpf_oge:
 
 ; CHECK: call #__mspabi_cmpf
-  %0 = load volatile float, float* @g_float, align 8
+  %0 = load volatile float, ptr @g_float, align 8
   %1 = fcmp oge float %0, 123.0
 
   ret i1 %1
@@ -311,7 +311,7 @@ entry:
 ; CHECK: cmpf_olt:
 
 ; CHECK: call #__mspabi_cmpf
-  %0 = load volatile float, float* @g_float, align 8
+  %0 = load volatile float, ptr @g_float, align 8
   %1 = fcmp olt float %0, 123.0
 
   ret i1 %1
@@ -322,7 +322,7 @@ entry:
 ; CHECK: cmpf_ole:
 
 ; CHECK: call #__mspabi_cmpf
-  %0 = load volatile float, float* @g_float, align 8
+  %0 = load volatile float, ptr @g_float, align 8
   %1 = fcmp ole float %0, 123.0
 
   ret i1 %1
@@ -333,7 +333,7 @@ entry:
 ; CHECK: cmpf_ogt:
 
 ; CHECK: call #__mspabi_cmpf
-  %0 = load volatile float, float* @g_float, align 8
+  %0 = load volatile float, ptr @g_float, align 8
   %1 = fcmp ogt float %0, 123.0
 
   ret i1 %1
@@ -344,7 +344,7 @@ entry:
 ; CHECK: addd:
 
 ; CHECK: call #__mspabi_addd
-  %0 = load volatile double, double* @g_double, align 8
+  %0 = load volatile double, ptr @g_double, align 8
   %1 = fadd double %0, 123.0
 
   ret double %1
@@ -355,7 +355,7 @@ entry:
 ; CHECK: addf:
 
 ; CHECK: call #__mspabi_addf
-  %0 = load volatile float, float* @g_float, align 8
+  %0 = load volatile float, ptr @g_float, align 8
   %1 = fadd float %0, 123.0
 
   ret float %1
@@ -366,7 +366,7 @@ entry:
 ; CHECK: divd:
 
 ; CHECK: call #__mspabi_divd
-  %0 = load volatile double, double* @g_double, align 8
+  %0 = load volatile double, ptr @g_double, align 8
   %1 = fdiv double %0, 123.0
 
   ret double %1
@@ -377,7 +377,7 @@ entry:
 ; CHECK: divf:
 
 ; CHECK: call #__mspabi_divf
-  %0 = load volatile float, float* @g_float, align 8
+  %0 = load volatile float, ptr @g_float, align 8
   %1 = fdiv float %0, 123.0
 
   ret float %1
@@ -388,7 +388,7 @@ entry:
 ; CHECK: mpyd:
 
 ; CHECK: call #__mspabi_mpyd
-  %0 = load volatile double, double* @g_double, align 8
+  %0 = load volatile double, ptr @g_double, align 8
   %1 = fmul double %0, 123.0
 
   ret double %1
@@ -399,7 +399,7 @@ entry:
 ; CHECK: mpyf:
 
 ; CHECK: call #__mspabi_mpyf
-  %0 = load volatile float, float* @g_float, align 8
+  %0 = load volatile float, ptr @g_float, align 8
   %1 = fmul float %0, 123.0
 
   ret float %1
@@ -410,7 +410,7 @@ entry:
 ; CHECK: subd:
 
 ; CHECK: call #__mspabi_subd
-  %0 = load volatile double, double* @g_double, align 8
+  %0 = load volatile double, ptr @g_double, align 8
   %1 = fsub double %0, %0
 
   ret double %1
@@ -421,7 +421,7 @@ entry:
 ; CHECK: subf:
 
 ; CHECK: call #__mspabi_subf
-  %0 = load volatile float, float* @g_float, align 8
+  %0 = load volatile float, ptr @g_float, align 8
   %1 = fsub float %0, %0
 
   ret float %1
@@ -432,8 +432,8 @@ entry:
 ; CHECK: divi:
 
 ; CHECK: call #__mspabi_divi
-  %0 = load volatile i16, i16* @g_i16, align 8
-  %1 = load volatile i16, i16* @g_i16, align 8
+  %0 = load volatile i16, ptr @g_i16, align 8
+  %1 = load volatile i16, ptr @g_i16, align 8
   %2 = sdiv i16 %0, %1
 
   ret i16 %2
@@ -444,8 +444,8 @@ entry:
 ; CHECK: divli:
 
 ; CHECK: call #__mspabi_divli
-  %0 = load volatile i32, i32* @g_i32, align 8
-  %1 = load volatile i32, i32* @g_i32, align 8
+  %0 = load volatile i32, ptr @g_i32, align 8
+  %1 = load volatile i32, ptr @g_i32, align 8
   %2 = sdiv i32 %0, %1
 
   ret i32 %2
@@ -456,8 +456,8 @@ entry:
 ; CHECK: divlli:
 
 ; CHECK: call #__mspabi_divlli
-  %0 = load volatile i64, i64* @g_i64, align 8
-  %1 = load volatile i64, i64* @g_i64, align 8
+  %0 = load volatile i64, ptr @g_i64, align 8
+  %1 = load volatile i64, ptr @g_i64, align 8
   %2 = sdiv i64 %0, %1
 
   ret i64 %2
@@ -468,8 +468,8 @@ entry:
 ; CHECK: divu:
 
 ; CHECK: call #__mspabi_divu
-  %0 = load volatile i16, i16* @g_i16, align 8
-  %1 = load volatile i16, i16* @g_i16, align 8
+  %0 = load volatile i16, ptr @g_i16, align 8
+  %1 = load volatile i16, ptr @g_i16, align 8
   %2 = udiv i16 %0, %1
 
   ret i16 %2
@@ -480,8 +480,8 @@ entry:
 ; CHECK: divul:
 
 ; CHECK: call #__mspabi_divul
-  %0 = load volatile i32, i32* @g_i32, align 8
-  %1 = load volatile i32, i32* @g_i32, align 8
+  %0 = load volatile i32, ptr @g_i32, align 8
+  %1 = load volatile i32, ptr @g_i32, align 8
   %2 = udiv i32 %0, %1
 
   ret i32 %2
@@ -492,8 +492,8 @@ entry:
 ; CHECK: divull:
 
 ; CHECK: call #__mspabi_divull
-  %0 = load volatile i64, i64* @g_i64, align 8
-  %1 = load volatile i64, i64* @g_i64, align 8
+  %0 = load volatile i64, ptr @g_i64, align 8
+  %1 = load volatile i64, ptr @g_i64, align 8
   %2 = udiv i64 %0, %1
 
   ret i64 %2
@@ -504,8 +504,8 @@ entry:
 ; CHECK: remi:
 
 ; CHECK: call #__mspabi_remi
-  %0 = load volatile i16, i16* @g_i16, align 8
-  %1 = load volatile i16, i16* @g_i16, align 8
+  %0 = load volatile i16, ptr @g_i16, align 8
+  %1 = load volatile i16, ptr @g_i16, align 8
   %2 = srem i16 %0, %1
 
   ret i16 %2
@@ -516,8 +516,8 @@ entry:
 ; CHECK: remli:
 
 ; CHECK: call #__mspabi_remli
-  %0 = load volatile i32, i32* @g_i32, align 8
-  %1 = load volatile i32, i32* @g_i32, align 8
+  %0 = load volatile i32, ptr @g_i32, align 8
+  %1 = load volatile i32, ptr @g_i32, align 8
   %2 = srem i32 %0, %1
 
   ret i32 %2
@@ -528,8 +528,8 @@ entry:
 ; CHECK: remlli:
 
 ; CHECK: call #__mspabi_remlli
-  %0 = load volatile i64, i64* @g_i64, align 8
-  %1 = load volatile i64, i64* @g_i64, align 8
+  %0 = load volatile i64, ptr @g_i64, align 8
+  %1 = load volatile i64, ptr @g_i64, align 8
   %2 = srem i64 %0, %1
 
   ret i64 %2
@@ -540,8 +540,8 @@ entry:
 ; CHECK: remu:
 
 ; CHECK: call #__mspabi_remu
-  %0 = load volatile i16, i16* @g_i16, align 8
-  %1 = load volatile i16, i16* @g_i16, align 8
+  %0 = load volatile i16, ptr @g_i16, align 8
+  %1 = load volatile i16, ptr @g_i16, align 8
   %2 = urem i16 %0, %1
 
   ret i16 %2
@@ -552,8 +552,8 @@ entry:
 ; CHECK: remul:
 
 ; CHECK: call #__mspabi_remul
-  %0 = load volatile i32, i32* @g_i32, align 8
-  %1 = load volatile i32, i32* @g_i32, align 8
+  %0 = load volatile i32, ptr @g_i32, align 8
+  %1 = load volatile i32, ptr @g_i32, align 8
   %2 = urem i32 %0, %1
 
   ret i32 %2
@@ -564,8 +564,8 @@ entry:
 ; CHECK: remull:
 
 ; CHECK: call #__mspabi_remull
-  %0 = load volatile i64, i64* @g_i64, align 8
-  %1 = load volatile i64, i64* @g_i64, align 8
+  %0 = load volatile i64, ptr @g_i64, align 8
+  %1 = load volatile i64, ptr @g_i64, align 8
   %2 = urem i64 %0, %1
 
   ret i64 %2
@@ -576,7 +576,7 @@ entry:
 ; CHECK: mpyi:
 
 ; CHECK: call #__mspabi_mpyi
-  %0 = load volatile i16, i16* @g_i16, align 8
+  %0 = load volatile i16, ptr @g_i16, align 8
   %1 = mul i16 %0, %0
 
   ret i16 %1
@@ -587,7 +587,7 @@ entry:
 ; CHECK: mpyli:
 
 ; CHECK: call #__mspabi_mpyl
-  %0 = load volatile i32, i32* @g_i32, align 8
+  %0 = load volatile i32, ptr @g_i32, align 8
   %1 = mul i32 %0, %0
 
   ret i32 %1
@@ -598,7 +598,7 @@ entry:
 ; CHECK: mpylli:
 
 ; CHECK: call #__mspabi_mpyll
-  %0 = load volatile i64, i64* @g_i64, align 8
+  %0 = load volatile i64, ptr @g_i64, align 8
   %1 = mul i64 %0, %0
 
   ret i64 %1
@@ -610,8 +610,8 @@ define i32 @srll() #0 {
 entry:
 ; CHECK-LABEL: srll:
 ; CHECK: call #__mspabi_srll
-  %0 = load volatile i32, i32* @g_i32, align 2
-  %1 = load volatile i32, i32* @i, align 2
+  %0 = load volatile i32, ptr @g_i32, align 2
+  %1 = load volatile i32, ptr @i, align 2
   %shr = lshr i32 %0, %1
 
   ret i32 %shr
@@ -621,8 +621,8 @@ define i32 @sral() #0 {
 entry:
 ; CHECK-LABEL: sral:
 ; CHECK: call #__mspabi_sral
-  %0 = load volatile i32, i32* @g_i32, align 2
-  %1 = load volatile i32, i32* @i, align 2
+  %0 = load volatile i32, ptr @g_i32, align 2
+  %1 = load volatile i32, ptr @i, align 2
   %shr = ashr i32 %0, %1
 
   ret i32 %shr
@@ -632,8 +632,8 @@ define i32 @slll() #0 {
 entry:
 ; CHECK-LABEL: slll:
 ; CHECK: call #__mspabi_slll
-  %0 = load volatile i32, i32* @g_i32, align 2
-  %1 = load volatile i32, i32* @i, align 2
+  %0 = load volatile i32, ptr @g_i32, align 2
+  %1 = load volatile i32, ptr @i, align 2
   %shr = shl i32 %0, %1
 
   ret i32 %shr

diff  --git a/llvm/test/CodeGen/MSP430/memset.ll b/llvm/test/CodeGen/MSP430/memset.ll
index 0f83b6078201f..ad0b0edef69aa 100644
--- a/llvm/test/CodeGen/MSP430/memset.ll
+++ b/llvm/test/CodeGen/MSP430/memset.ll
@@ -2,21 +2,21 @@
 target datalayout = "e-p:16:16:16-i8:8:8-i16:16:16-i32:16:32-n8:16"
 target triple = "msp430---elf"
 
- at buf = external global i8*
+ at buf = external global ptr
 
 ; Function Attrs: nounwind
 define void @test() nounwind {
 entry:
 ; CHECK-LABEL: test:
-  %0 = load i8*, i8** @buf, align 2
+  %0 = load ptr, ptr @buf, align 2
 ; CHECK: mov &buf, r12
 ; CHECK-NEXT: mov #5, r13
 ; CHECK-NEXT: mov #128, r14
 ; CHECK-NEXT: call #memset
-  call void @llvm.memset.p0i8.i16(i8* %0, i8 5, i16 128, i1 false)
+  call void @llvm.memset.p0.i16(ptr %0, i8 5, i16 128, i1 false)
   ret void
 }
 
 ; Function Attrs: nounwind
-declare void @llvm.memset.p0i8.i16(i8* nocapture, i8, i16, i1) nounwind
+declare void @llvm.memset.p0.i16(ptr nocapture, i8, i16, i1) nounwind
 

diff  --git a/llvm/test/CodeGen/MSP430/misched-msp430.ll b/llvm/test/CodeGen/MSP430/misched-msp430.ll
index f44f10ccd3ee9..0d3c60eaa5551 100644
--- a/llvm/test/CodeGen/MSP430/misched-msp430.ll
+++ b/llvm/test/CodeGen/MSP430/misched-msp430.ll
@@ -14,7 +14,7 @@ target datalayout = "e-p:16:16:16-i8:8:8-i16:16:16-i32:16:32-n8:16"
 ; CHECK: ret
 define void @f() {
 entry:
-  %0 = load i16, i16* @y, align 2
-  store i16 %0, i16* @x, align 2
+  %0 = load i16, ptr @y, align 2
+  store i16 %0, ptr @x, align 2
   ret void
 }

diff  --git a/llvm/test/CodeGen/MSP430/mult-alt-generic-msp430.ll b/llvm/test/CodeGen/MSP430/mult-alt-generic-msp430.ll
index 86809c5bd3d02..6655af5cc7565 100644
--- a/llvm/test/CodeGen/MSP430/mult-alt-generic-msp430.ll
+++ b/llvm/test/CodeGen/MSP430/mult-alt-generic-msp430.ll
@@ -9,7 +9,7 @@ target triple = "msp430"
 
 define void @single_m() nounwind {
 entry:
-  call void asm "foo $1,$0", "=*m,*m"(i16* elementtype(i16) @mout0, i16* elementtype(i16) @min1) nounwind
+  call void asm "foo $1,$0", "=*m,*m"(ptr elementtype(i16) @mout0, ptr elementtype(i16) @min1) nounwind
   ret void
 }
 
@@ -17,8 +17,8 @@ define void @single_o() nounwind {
 entry:
   %out0 = alloca i16, align 2
   %index = alloca i16, align 2
-  store i16 0, i16* %out0, align 2
-  store i16 1, i16* %index, align 2
+  store i16 0, ptr %out0, align 2
+  store i16 1, ptr %index, align 2
   ret void
 }
 
@@ -31,14 +31,14 @@ define void @single_lt() nounwind {
 entry:
   %out0 = alloca i16, align 2
   %in1 = alloca i16, align 2
-  store i16 0, i16* %out0, align 2
-  store i16 1, i16* %in1, align 2
-  %tmp = load i16, i16* %in1, align 2
+  store i16 0, ptr %out0, align 2
+  store i16 1, ptr %in1, align 2
+  %tmp = load i16, ptr %in1, align 2
   %0 = call i16 asm "foo $1,$0", "=r,<r"(i16 %tmp) nounwind
-  store i16 %0, i16* %out0, align 2
-  %tmp1 = load i16, i16* %in1, align 2
+  store i16 %0, ptr %out0, align 2
+  %tmp1 = load i16, ptr %in1, align 2
   %1 = call i16 asm "foo $1,$0", "=r,r<"(i16 %tmp1) nounwind
-  store i16 %1, i16* %out0, align 2
+  store i16 %1, ptr %out0, align 2
   ret void
 }
 
@@ -46,14 +46,14 @@ define void @single_gt() nounwind {
 entry:
   %out0 = alloca i16, align 2
   %in1 = alloca i16, align 2
-  store i16 0, i16* %out0, align 2
-  store i16 1, i16* %in1, align 2
-  %tmp = load i16, i16* %in1, align 2
+  store i16 0, ptr %out0, align 2
+  store i16 1, ptr %in1, align 2
+  %tmp = load i16, ptr %in1, align 2
   %0 = call i16 asm "foo $1,$0", "=r,>r"(i16 %tmp) nounwind
-  store i16 %0, i16* %out0, align 2
-  %tmp1 = load i16, i16* %in1, align 2
+  store i16 %0, ptr %out0, align 2
+  %tmp1 = load i16, ptr %in1, align 2
   %1 = call i16 asm "foo $1,$0", "=r,r>"(i16 %tmp1) nounwind
-  store i16 %1, i16* %out0, align 2
+  store i16 %1, ptr %out0, align 2
   ret void
 }
 
@@ -61,36 +61,36 @@ define void @single_r() nounwind {
 entry:
   %out0 = alloca i16, align 2
   %in1 = alloca i16, align 2
-  store i16 0, i16* %out0, align 2
-  store i16 1, i16* %in1, align 2
-  %tmp = load i16, i16* %in1, align 2
+  store i16 0, ptr %out0, align 2
+  store i16 1, ptr %in1, align 2
+  %tmp = load i16, ptr %in1, align 2
   %0 = call i16 asm "foo $1,$0", "=r,r"(i16 %tmp) nounwind
-  store i16 %0, i16* %out0, align 2
+  store i16 %0, ptr %out0, align 2
   ret void
 }
 
 define void @single_i() nounwind {
 entry:
   %out0 = alloca i16, align 2
-  store i16 0, i16* %out0, align 2
+  store i16 0, ptr %out0, align 2
   %0 = call i16 asm "foo $1,$0", "=r,i"(i16 1) nounwind
-  store i16 %0, i16* %out0, align 2
+  store i16 %0, ptr %out0, align 2
   ret void
 }
 
 define void @single_n() nounwind {
 entry:
   %out0 = alloca i16, align 2
-  store i16 0, i16* %out0, align 2
+  store i16 0, ptr %out0, align 2
   %0 = call i16 asm "foo $1,$0", "=r,n"(i16 1) nounwind
-  store i16 %0, i16* %out0, align 2
+  store i16 %0, ptr %out0, align 2
   ret void
 }
 
 define void @single_E() nounwind {
 entry:
   %out0 = alloca double, align 8
-  store double 0.000000e+000, double* %out0, align 8
+  store double 0.000000e+000, ptr %out0, align 8
 ; No lowering support.
 ;  %0 = call double asm "foo $1,$0", "=r,E"(double 1.000000e+001) nounwind
 ;  store double %0, double* %out0, align 8
@@ -100,7 +100,7 @@ entry:
 define void @single_F() nounwind {
 entry:
   %out0 = alloca double, align 8
-  store double 0.000000e+000, double* %out0, align 8
+  store double 0.000000e+000, ptr %out0, align 8
 ; No lowering support.
 ;  %0 = call double asm "foo $1,$0", "=r,F"(double 1.000000e+000) nounwind
 ;  store double %0, double* %out0, align 8
@@ -110,7 +110,7 @@ entry:
 define void @single_s() nounwind {
 entry:
   %out0 = alloca i16, align 2
-  store i16 0, i16* %out0, align 2
+  store i16 0, ptr %out0, align 2
   ret void
 }
 
@@ -118,16 +118,16 @@ define void @single_g() nounwind {
 entry:
   %out0 = alloca i16, align 2
   %in1 = alloca i16, align 2
-  store i16 0, i16* %out0, align 2
-  store i16 1, i16* %in1, align 2
-  %tmp = load i16, i16* %in1, align 2
+  store i16 0, ptr %out0, align 2
+  store i16 1, ptr %in1, align 2
+  %tmp = load i16, ptr %in1, align 2
   %0 = call i16 asm "foo $1,$0", "=r,imr"(i16 %tmp) nounwind
-  store i16 %0, i16* %out0, align 2
-  %tmp1 = load i16, i16* @min1, align 2
+  store i16 %0, ptr %out0, align 2
+  %tmp1 = load i16, ptr @min1, align 2
   %1 = call i16 asm "foo $1,$0", "=r,imr"(i16 %tmp1) nounwind
-  store i16 %1, i16* %out0, align 2
+  store i16 %1, ptr %out0, align 2
   %2 = call i16 asm "foo $1,$0", "=r,imr"(i16 1) nounwind
-  store i16 %2, i16* %out0, align 2
+  store i16 %2, ptr %out0, align 2
   ret void
 }
 
@@ -135,18 +135,18 @@ define void @single_X() nounwind {
 entry:
   %out0 = alloca i16, align 2
   %in1 = alloca i16, align 2
-  store i16 0, i16* %out0, align 2
-  store i16 1, i16* %in1, align 2
-  %tmp = load i16, i16* %in1, align 2
+  store i16 0, ptr %out0, align 2
+  store i16 1, ptr %in1, align 2
+  %tmp = load i16, ptr %in1, align 2
   %0 = call i16 asm "foo $1,$0", "=r,X"(i16 %tmp) nounwind
-  store i16 %0, i16* %out0, align 2
-  %tmp1 = load i16, i16* @min1, align 2
+  store i16 %0, ptr %out0, align 2
+  %tmp1 = load i16, ptr @min1, align 2
   %1 = call i16 asm "foo $1,$0", "=r,X"(i16 %tmp1) nounwind
-  store i16 %1, i16* %out0, align 2
+  store i16 %1, ptr %out0, align 2
   %2 = call i16 asm "foo $1,$0", "=r,X"(i16 1) nounwind
-  store i16 %2, i16* %out0, align 2
-  %3 = call i16 asm "foo $1,$0", "=r,X"(i16* getelementptr inbounds ([2 x i16], [2 x i16]* @marray, i32 0, i32 0)) nounwind
-  store i16 %3, i16* %out0, align 2
+  store i16 %2, ptr %out0, align 2
+  %3 = call i16 asm "foo $1,$0", "=r,X"(ptr @marray) nounwind
+  store i16 %3, ptr %out0, align 2
 ; No lowering support.
 ;  %4 = call i16 asm "foo $1,$0", "=r,X"(double 1.000000e+001) nounwind
 ;  store i16 %4, i16* %out0, align 2
@@ -158,16 +158,16 @@ entry:
 define void @single_p() nounwind {
 entry:
   %out0 = alloca i16, align 2
-  store i16 0, i16* %out0, align 2
-  %0 = call i16 asm "foo $1,$0", "=r,r"(i16* getelementptr inbounds ([2 x i16], [2 x i16]* @marray, i32 0, i32 0)) nounwind
-  store i16 %0, i16* %out0, align 2
+  store i16 0, ptr %out0, align 2
+  %0 = call i16 asm "foo $1,$0", "=r,r"(ptr @marray) nounwind
+  store i16 %0, ptr %out0, align 2
   ret void
 }
 
 define void @multi_m() nounwind {
 entry:
-  %tmp = load i16, i16* @min1, align 2
-  call void asm "foo $1,$0", "=*m|r,m|r"(i16* elementtype(i16) @mout0, i16 %tmp) nounwind
+  %tmp = load i16, ptr @min1, align 2
+  call void asm "foo $1,$0", "=*m|r,m|r"(ptr elementtype(i16) @mout0, i16 %tmp) nounwind
   ret void
 }
 
@@ -175,8 +175,8 @@ define void @multi_o() nounwind {
 entry:
   %out0 = alloca i16, align 2
   %index = alloca i16, align 2
-  store i16 0, i16* %out0, align 2
-  store i16 1, i16* %index, align 2
+  store i16 0, ptr %out0, align 2
+  store i16 1, ptr %index, align 2
   ret void
 }
 
@@ -189,14 +189,14 @@ define void @multi_lt() nounwind {
 entry:
   %out0 = alloca i16, align 2
   %in1 = alloca i16, align 2
-  store i16 0, i16* %out0, align 2
-  store i16 1, i16* %in1, align 2
-  %tmp = load i16, i16* %in1, align 2
+  store i16 0, ptr %out0, align 2
+  store i16 1, ptr %in1, align 2
+  %tmp = load i16, ptr %in1, align 2
   %0 = call i16 asm "foo $1,$0", "=r|r,r|<r"(i16 %tmp) nounwind
-  store i16 %0, i16* %out0, align 2
-  %tmp1 = load i16, i16* %in1, align 2
+  store i16 %0, ptr %out0, align 2
+  %tmp1 = load i16, ptr %in1, align 2
   %1 = call i16 asm "foo $1,$0", "=r|r,r|r<"(i16 %tmp1) nounwind
-  store i16 %1, i16* %out0, align 2
+  store i16 %1, ptr %out0, align 2
   ret void
 }
 
@@ -204,14 +204,14 @@ define void @multi_gt() nounwind {
 entry:
   %out0 = alloca i16, align 2
   %in1 = alloca i16, align 2
-  store i16 0, i16* %out0, align 2
-  store i16 1, i16* %in1, align 2
-  %tmp = load i16, i16* %in1, align 2
+  store i16 0, ptr %out0, align 2
+  store i16 1, ptr %in1, align 2
+  %tmp = load i16, ptr %in1, align 2
   %0 = call i16 asm "foo $1,$0", "=r|r,r|>r"(i16 %tmp) nounwind
-  store i16 %0, i16* %out0, align 2
-  %tmp1 = load i16, i16* %in1, align 2
+  store i16 %0, ptr %out0, align 2
+  %tmp1 = load i16, ptr %in1, align 2
   %1 = call i16 asm "foo $1,$0", "=r|r,r|r>"(i16 %tmp1) nounwind
-  store i16 %1, i16* %out0, align 2
+  store i16 %1, ptr %out0, align 2
   ret void
 }
 
@@ -219,36 +219,36 @@ define void @multi_r() nounwind {
 entry:
   %out0 = alloca i16, align 2
   %in1 = alloca i16, align 2
-  store i16 0, i16* %out0, align 2
-  store i16 1, i16* %in1, align 2
-  %tmp = load i16, i16* %in1, align 2
+  store i16 0, ptr %out0, align 2
+  store i16 1, ptr %in1, align 2
+  %tmp = load i16, ptr %in1, align 2
   %0 = call i16 asm "foo $1,$0", "=r|r,r|m"(i16 %tmp) nounwind
-  store i16 %0, i16* %out0, align 2
+  store i16 %0, ptr %out0, align 2
   ret void
 }
 
 define void @multi_i() nounwind {
 entry:
   %out0 = alloca i16, align 2
-  store i16 0, i16* %out0, align 2
+  store i16 0, ptr %out0, align 2
   %0 = call i16 asm "foo $1,$0", "=r|r,r|i"(i16 1) nounwind
-  store i16 %0, i16* %out0, align 2
+  store i16 %0, ptr %out0, align 2
   ret void
 }
 
 define void @multi_n() nounwind {
 entry:
   %out0 = alloca i16, align 2
-  store i16 0, i16* %out0, align 2
+  store i16 0, ptr %out0, align 2
   %0 = call i16 asm "foo $1,$0", "=r|r,r|n"(i16 1) nounwind
-  store i16 %0, i16* %out0, align 2
+  store i16 %0, ptr %out0, align 2
   ret void
 }
 
 define void @multi_E() nounwind {
 entry:
   %out0 = alloca double, align 8
-  store double 0.000000e+000, double* %out0, align 8
+  store double 0.000000e+000, ptr %out0, align 8
 ; No lowering support.
 ;  %0 = call double asm "foo $1,$0", "=r|r,r|E"(double 1.000000e+001) nounwind
 ;  store double %0, double* %out0, align 8
@@ -258,7 +258,7 @@ entry:
 define void @multi_F() nounwind {
 entry:
   %out0 = alloca double, align 8
-  store double 0.000000e+000, double* %out0, align 8
+  store double 0.000000e+000, ptr %out0, align 8
 ; No lowering support.
 ;  %0 = call double asm "foo $1,$0", "=r|r,r|F"(double 1.000000e+000) nounwind
 ;  store double %0, double* %out0, align 8
@@ -268,7 +268,7 @@ entry:
 define void @multi_s() nounwind {
 entry:
   %out0 = alloca i16, align 2
-  store i16 0, i16* %out0, align 2
+  store i16 0, ptr %out0, align 2
   ret void
 }
 
@@ -276,16 +276,16 @@ define void @multi_g() nounwind {
 entry:
   %out0 = alloca i16, align 2
   %in1 = alloca i16, align 2
-  store i16 0, i16* %out0, align 2
-  store i16 1, i16* %in1, align 2
-  %tmp = load i16, i16* %in1, align 2
+  store i16 0, ptr %out0, align 2
+  store i16 1, ptr %in1, align 2
+  %tmp = load i16, ptr %in1, align 2
   %0 = call i16 asm "foo $1,$0", "=r|r,r|imr"(i16 %tmp) nounwind
-  store i16 %0, i16* %out0, align 2
-  %tmp1 = load i16, i16* @min1, align 2
+  store i16 %0, ptr %out0, align 2
+  %tmp1 = load i16, ptr @min1, align 2
   %1 = call i16 asm "foo $1,$0", "=r|r,r|imr"(i16 %tmp1) nounwind
-  store i16 %1, i16* %out0, align 2
+  store i16 %1, ptr %out0, align 2
   %2 = call i16 asm "foo $1,$0", "=r|r,r|imr"(i16 1) nounwind
-  store i16 %2, i16* %out0, align 2
+  store i16 %2, ptr %out0, align 2
   ret void
 }
 
@@ -293,18 +293,18 @@ define void @multi_X() nounwind {
 entry:
   %out0 = alloca i16, align 2
   %in1 = alloca i16, align 2
-  store i16 0, i16* %out0, align 2
-  store i16 1, i16* %in1, align 2
-  %tmp = load i16, i16* %in1, align 2
+  store i16 0, ptr %out0, align 2
+  store i16 1, ptr %in1, align 2
+  %tmp = load i16, ptr %in1, align 2
   %0 = call i16 asm "foo $1,$0", "=r|r,r|X"(i16 %tmp) nounwind
-  store i16 %0, i16* %out0, align 2
-  %tmp1 = load i16, i16* @min1, align 2
+  store i16 %0, ptr %out0, align 2
+  %tmp1 = load i16, ptr @min1, align 2
   %1 = call i16 asm "foo $1,$0", "=r|r,r|X"(i16 %tmp1) nounwind
-  store i16 %1, i16* %out0, align 2
+  store i16 %1, ptr %out0, align 2
   %2 = call i16 asm "foo $1,$0", "=r|r,r|X"(i16 1) nounwind
-  store i16 %2, i16* %out0, align 2
-  %3 = call i16 asm "foo $1,$0", "=r|r,r|X"(i16* getelementptr inbounds ([2 x i16], [2 x i16]* @marray, i32 0, i32 0)) nounwind
-  store i16 %3, i16* %out0, align 2
+  store i16 %2, ptr %out0, align 2
+  %3 = call i16 asm "foo $1,$0", "=r|r,r|X"(ptr @marray) nounwind
+  store i16 %3, ptr %out0, align 2
 ; No lowering support.
 ;  %4 = call i16 asm "foo $1,$0", "=r|r,r|X"(double 1.000000e+001) nounwind
 ;  store i16 %4, i16* %out0, align 2
@@ -316,8 +316,8 @@ entry:
 define void @multi_p() nounwind {
 entry:
   %out0 = alloca i16, align 2
-  store i16 0, i16* %out0, align 2
-  %0 = call i16 asm "foo $1,$0", "=r|r,r|r"(i16* getelementptr inbounds ([2 x i16], [2 x i16]* @marray, i32 0, i32 0)) nounwind
-  store i16 %0, i16* %out0, align 2
+  store i16 0, ptr %out0, align 2
+  %0 = call i16 asm "foo $1,$0", "=r|r,r|r"(ptr @marray) nounwind
+  store i16 %0, ptr %out0, align 2
   ret void
 }

diff  --git a/llvm/test/CodeGen/MSP430/postinc.ll b/llvm/test/CodeGen/MSP430/postinc.ll
index 20ee8fb3c8562..3f1320d713433 100644
--- a/llvm/test/CodeGen/MSP430/postinc.ll
+++ b/llvm/test/CodeGen/MSP430/postinc.ll
@@ -2,7 +2,7 @@
 target datalayout = "e-p:16:8:8-i8:8:8-i16:8:8-i32:8:8"
 target triple = "msp430"
 
-define zeroext i16 @add(i16* nocapture %a, i16 zeroext %n) nounwind readonly {
+define zeroext i16 @add(ptr nocapture %a, i16 zeroext %n) nounwind readonly {
 entry:
   %cmp8 = icmp eq i16 %n, 0                       ; <i1> [#uses=1]
   br i1 %cmp8, label %for.end, label %for.body
@@ -10,10 +10,10 @@ entry:
 for.body:                                         ; preds = %for.body, %entry
   %i.010 = phi i16 [ 0, %entry ], [ %inc, %for.body ] ; <i16> [#uses=2]
   %sum.09 = phi i16 [ 0, %entry ], [ %add, %for.body ] ; <i16> [#uses=1]
-  %arrayidx = getelementptr i16, i16* %a, i16 %i.010   ; <i16*> [#uses=1]
+  %arrayidx = getelementptr i16, ptr %a, i16 %i.010   ; <i16*> [#uses=1]
 ; CHECK-LABEL: add:
 ; CHECK: add @r{{[0-9]+}}+, r{{[0-9]+}}
-  %tmp4 = load i16, i16* %arrayidx                     ; <i16> [#uses=1]
+  %tmp4 = load i16, ptr %arrayidx                     ; <i16> [#uses=1]
   %add = add i16 %tmp4, %sum.09                   ; <i16> [#uses=2]
   %inc = add i16 %i.010, 1                        ; <i16> [#uses=2]
   %exitcond = icmp eq i16 %inc, %n                ; <i1> [#uses=1]
@@ -24,7 +24,7 @@ for.end:                                          ; preds = %for.body, %entry
   ret i16 %sum.0.lcssa
 }
 
-define zeroext i16 @sub(i16* nocapture %a, i16 zeroext %n) nounwind readonly {
+define zeroext i16 @sub(ptr nocapture %a, i16 zeroext %n) nounwind readonly {
 entry:
   %cmp8 = icmp eq i16 %n, 0                       ; <i1> [#uses=1]
   br i1 %cmp8, label %for.end, label %for.body
@@ -32,10 +32,10 @@ entry:
 for.body:                                         ; preds = %for.body, %entry
   %i.010 = phi i16 [ 0, %entry ], [ %inc, %for.body ] ; <i16> [#uses=2]
   %sum.09 = phi i16 [ 0, %entry ], [ %add, %for.body ] ; <i16> [#uses=1]
-  %arrayidx = getelementptr i16, i16* %a, i16 %i.010   ; <i16*> [#uses=1]
+  %arrayidx = getelementptr i16, ptr %a, i16 %i.010   ; <i16*> [#uses=1]
 ; CHECK-LABEL: sub:
 ; CHECK: sub @r{{[0-9]+}}+, r{{[0-9]+}}
-  %tmp4 = load i16, i16* %arrayidx                     ; <i16> [#uses=1]
+  %tmp4 = load i16, ptr %arrayidx                     ; <i16> [#uses=1]
   %add = sub i16 %tmp4, %sum.09                   ; <i16> [#uses=2]
   %inc = add i16 %i.010, 1                        ; <i16> [#uses=2]
   %exitcond = icmp eq i16 %inc, %n                ; <i1> [#uses=1]
@@ -46,7 +46,7 @@ for.end:                                          ; preds = %for.body, %entry
   ret i16 %sum.0.lcssa
 }
 
-define zeroext i16 @or(i16* nocapture %a, i16 zeroext %n) nounwind readonly {
+define zeroext i16 @or(ptr nocapture %a, i16 zeroext %n) nounwind readonly {
 entry:
   %cmp8 = icmp eq i16 %n, 0                       ; <i1> [#uses=1]
   br i1 %cmp8, label %for.end, label %for.body
@@ -54,10 +54,10 @@ entry:
 for.body:                                         ; preds = %for.body, %entry
   %i.010 = phi i16 [ 0, %entry ], [ %inc, %for.body ] ; <i16> [#uses=2]
   %sum.09 = phi i16 [ 0, %entry ], [ %add, %for.body ] ; <i16> [#uses=1]
-  %arrayidx = getelementptr i16, i16* %a, i16 %i.010   ; <i16*> [#uses=1]
+  %arrayidx = getelementptr i16, ptr %a, i16 %i.010   ; <i16*> [#uses=1]
 ; CHECK-LABEL: or:
 ; CHECK: bis @r{{[0-9]+}}+, r{{[0-9]+}}
-  %tmp4 = load i16, i16* %arrayidx                     ; <i16> [#uses=1]
+  %tmp4 = load i16, ptr %arrayidx                     ; <i16> [#uses=1]
   %add = or i16 %tmp4, %sum.09                   ; <i16> [#uses=2]
   %inc = add i16 %i.010, 1                        ; <i16> [#uses=2]
   %exitcond = icmp eq i16 %inc, %n                ; <i1> [#uses=1]
@@ -68,7 +68,7 @@ for.end:                                          ; preds = %for.body, %entry
   ret i16 %sum.0.lcssa
 }
 
-define zeroext i16 @xor(i16* nocapture %a, i16 zeroext %n) nounwind readonly {
+define zeroext i16 @xor(ptr nocapture %a, i16 zeroext %n) nounwind readonly {
 entry:
   %cmp8 = icmp eq i16 %n, 0                       ; <i1> [#uses=1]
   br i1 %cmp8, label %for.end, label %for.body
@@ -76,10 +76,10 @@ entry:
 for.body:                                         ; preds = %for.body, %entry
   %i.010 = phi i16 [ 0, %entry ], [ %inc, %for.body ] ; <i16> [#uses=2]
   %sum.09 = phi i16 [ 0, %entry ], [ %add, %for.body ] ; <i16> [#uses=1]
-  %arrayidx = getelementptr i16, i16* %a, i16 %i.010   ; <i16*> [#uses=1]
+  %arrayidx = getelementptr i16, ptr %a, i16 %i.010   ; <i16*> [#uses=1]
 ; CHECK-LABEL: xor:
 ; CHECK: xor @r{{[0-9]+}}+, r{{[0-9]+}}
-  %tmp4 = load i16, i16* %arrayidx                     ; <i16> [#uses=1]
+  %tmp4 = load i16, ptr %arrayidx                     ; <i16> [#uses=1]
   %add = xor i16 %tmp4, %sum.09                   ; <i16> [#uses=2]
   %inc = add i16 %i.010, 1                        ; <i16> [#uses=2]
   %exitcond = icmp eq i16 %inc, %n                ; <i1> [#uses=1]
@@ -90,7 +90,7 @@ for.end:                                          ; preds = %for.body, %entry
   ret i16 %sum.0.lcssa
 }
 
-define zeroext i16 @and(i16* nocapture %a, i16 zeroext %n) nounwind readonly {
+define zeroext i16 @and(ptr nocapture %a, i16 zeroext %n) nounwind readonly {
 entry:
   %cmp8 = icmp eq i16 %n, 0                       ; <i1> [#uses=1]
   br i1 %cmp8, label %for.end, label %for.body
@@ -98,10 +98,10 @@ entry:
 for.body:                                         ; preds = %for.body, %entry
   %i.010 = phi i16 [ 0, %entry ], [ %inc, %for.body ] ; <i16> [#uses=2]
   %sum.09 = phi i16 [ 0, %entry ], [ %add, %for.body ] ; <i16> [#uses=1]
-  %arrayidx = getelementptr i16, i16* %a, i16 %i.010   ; <i16*> [#uses=1]
+  %arrayidx = getelementptr i16, ptr %a, i16 %i.010   ; <i16*> [#uses=1]
 ; CHECK-LABEL: and:
 ; CHECK: and @r{{[0-9]+}}+, r{{[0-9]+}}
-  %tmp4 = load i16, i16* %arrayidx                     ; <i16> [#uses=1]
+  %tmp4 = load i16, ptr %arrayidx                     ; <i16> [#uses=1]
   %add = and i16 %tmp4, %sum.09                   ; <i16> [#uses=2]
   %inc = add i16 %i.010, 1                        ; <i16> [#uses=2]
   %exitcond = icmp eq i16 %inc, %n                ; <i1> [#uses=1]

diff  --git a/llvm/test/CodeGen/MSP430/promote-i8-mul.ll b/llvm/test/CodeGen/MSP430/promote-i8-mul.ll
index 0e05e3978b1ee..2971502ceb768 100644
--- a/llvm/test/CodeGen/MSP430/promote-i8-mul.ll
+++ b/llvm/test/CodeGen/MSP430/promote-i8-mul.ll
@@ -13,10 +13,10 @@ entry:
   ret i8 %mul
 }
 
-define void @uint81(i16* nocapture %p_32) nounwind {
+define void @uint81(ptr nocapture %p_32) nounwind {
 entry:
-  %call = tail call i16 @bar(i8* bitcast (i8 (i8, i8)* @foo to i8*)) nounwind ; <i16> [#uses=0]
+  %call = tail call i16 @bar(ptr @foo) nounwind ; <i16> [#uses=0]
   ret void
 }
 
-declare i16 @bar(i8*)
+declare i16 @bar(ptr)

diff  --git a/llvm/test/CodeGen/MSP430/spill-to-stack.ll b/llvm/test/CodeGen/MSP430/spill-to-stack.ll
index 549e30c43a2c7..69f66938dd780 100644
--- a/llvm/test/CodeGen/MSP430/spill-to-stack.ll
+++ b/llvm/test/CodeGen/MSP430/spill-to-stack.ll
@@ -2,39 +2,39 @@
 %VeryLarge = type { i8, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
 
 ; intentionally cause a spill
-define void @inc(%VeryLarge* byval(%VeryLarge) align 1 %s) {
+define void @inc(ptr byval(%VeryLarge) align 1 %s) {
 entry:
-  %p0 = getelementptr inbounds %VeryLarge, %VeryLarge* %s, i32 0, i32 0
-  %0 = load i8, i8* %p0
-  %p1 = getelementptr inbounds %VeryLarge, %VeryLarge* %s, i32 0, i32 1
-  %1 = load i32, i32* %p1
-  %p2 = getelementptr inbounds %VeryLarge, %VeryLarge* %s, i32 0, i32 2
-  %2 = load i32, i32* %p2
-  %p3 = getelementptr inbounds %VeryLarge, %VeryLarge* %s, i32 0, i32 3
-  %3 = load i32, i32* %p3
-  %p4 = getelementptr inbounds %VeryLarge, %VeryLarge* %s, i32 0, i32 4
-  %4 = load i32, i32* %p4
-  %p5 = getelementptr inbounds %VeryLarge, %VeryLarge* %s, i32 0, i32 5
-  %5 = load i32, i32* %p5
-  %p6 = getelementptr inbounds %VeryLarge, %VeryLarge* %s, i32 0, i32 6
-  %6 = load i32, i32* %p6
-  %p7 = getelementptr inbounds %VeryLarge, %VeryLarge* %s, i32 0, i32 7
-  %7 = load i32, i32* %p7
+  %p0 = getelementptr inbounds %VeryLarge, ptr %s, i32 0, i32 0
+  %0 = load i8, ptr %p0
+  %p1 = getelementptr inbounds %VeryLarge, ptr %s, i32 0, i32 1
+  %1 = load i32, ptr %p1
+  %p2 = getelementptr inbounds %VeryLarge, ptr %s, i32 0, i32 2
+  %2 = load i32, ptr %p2
+  %p3 = getelementptr inbounds %VeryLarge, ptr %s, i32 0, i32 3
+  %3 = load i32, ptr %p3
+  %p4 = getelementptr inbounds %VeryLarge, ptr %s, i32 0, i32 4
+  %4 = load i32, ptr %p4
+  %p5 = getelementptr inbounds %VeryLarge, ptr %s, i32 0, i32 5
+  %5 = load i32, ptr %p5
+  %p6 = getelementptr inbounds %VeryLarge, ptr %s, i32 0, i32 6
+  %6 = load i32, ptr %p6
+  %p7 = getelementptr inbounds %VeryLarge, ptr %s, i32 0, i32 7
+  %7 = load i32, ptr %p7
   %add = add i8 %0, 1
-  store i8 %add, i8* %p0
+  store i8 %add, ptr %p0
   %add2 = add i32 %1, 2
-  store i32 %add2, i32* %p1
+  store i32 %add2, ptr %p1
   %add3 = add i32 %2, 3
-  store i32 %add3, i32* %p2
+  store i32 %add3, ptr %p2
   %add4 = add i32 %3, 4
-  store i32 %add4, i32* %p3
+  store i32 %add4, ptr %p3
   %add5 = add i32 %4, 5
-  store i32 %add5, i32* %p4
+  store i32 %add5, ptr %p4
   %add6 = add i32 %5, 6
-  store i32 %add6, i32* %p5
+  store i32 %add6, ptr %p5
   %add7 = add i32 %6, 7
-  store i32 %add7, i32* %p6
+  store i32 %add7, ptr %p6
   %add8 = add i32 %7, 8
-  store i32 %add8, i32* %p7
+  store i32 %add8, ptr %p7
   ret void
 }

diff  --git a/llvm/test/CodeGen/MSP430/stacksave_restore.ll b/llvm/test/CodeGen/MSP430/stacksave_restore.ll
index 47c4553929d4b..64bfcc63aad7c 100644
--- a/llvm/test/CodeGen/MSP430/stacksave_restore.ll
+++ b/llvm/test/CodeGen/MSP430/stacksave_restore.ll
@@ -4,10 +4,10 @@ target triple = "msp430"
 
 define void @foo() {
 entry:
-  %0 = tail call i8* @llvm.stacksave()
-  tail call void @llvm.stackrestore(i8* %0)
+  %0 = tail call ptr @llvm.stacksave()
+  tail call void @llvm.stackrestore(ptr %0)
   ret void
 }
 
-declare i8* @llvm.stacksave()
-declare void @llvm.stackrestore(i8*)
+declare ptr @llvm.stacksave()
+declare void @llvm.stackrestore(ptr)

diff  --git a/llvm/test/CodeGen/MSP430/struct-return.ll b/llvm/test/CodeGen/MSP430/struct-return.ll
index d0e5a7f8bb255..faee5afe831f0 100644
--- a/llvm/test/CodeGen/MSP430/struct-return.ll
+++ b/llvm/test/CodeGen/MSP430/struct-return.ll
@@ -31,24 +31,24 @@ define void @test() #1 {
   %1 = alloca %struct.S, align 2
 ; CHECK:      mov	r1, r12
 ; CHECK-NEXT: call	#sret
-  call void @sret(%struct.S* nonnull sret(%struct.S) %1) #3
+  call void @sret(ptr nonnull sret(%struct.S) %1) #3
   ret void
 }
 
-define void @sret(%struct.S* noalias nocapture sret(%struct.S)) #0 {
+define void @sret(ptr noalias nocapture sret(%struct.S)) #0 {
 ; CHECK-LABEL: sret:
 ; CHECK: mov	&a, 0(r12)
 ; CHECK: mov	&b, 2(r12)
 ; CHECK: mov	&c, 4(r12)
-  %2 = getelementptr inbounds %struct.S, %struct.S* %0, i16 0, i32 0
-  %3 = load i16, i16* @a, align 2
-  store i16 %3, i16* %2, align 2
-  %4 = getelementptr inbounds %struct.S, %struct.S* %0, i16 0, i32 1
-  %5 = load i16, i16* @b, align 2
-  store i16 %5, i16* %4, align 2
-  %6 = getelementptr inbounds %struct.S, %struct.S* %0, i16 0, i32 2
-  %7 = load i16, i16* @c, align 2
-  store i16 %7, i16* %6, align 2
+  %2 = getelementptr inbounds %struct.S, ptr %0, i16 0, i32 0
+  %3 = load i16, ptr @a, align 2
+  store i16 %3, ptr %2, align 2
+  %4 = getelementptr inbounds %struct.S, ptr %0, i16 0, i32 1
+  %5 = load i16, ptr @b, align 2
+  store i16 %5, ptr %4, align 2
+  %6 = getelementptr inbounds %struct.S, ptr %0, i16 0, i32 2
+  %7 = load i16, ptr @c, align 2
+  store i16 %7, ptr %6, align 2
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/MSP430/struct_layout.ll b/llvm/test/CodeGen/MSP430/struct_layout.ll
index 4c5a131acca6a..e228514761905 100644
--- a/llvm/test/CodeGen/MSP430/struct_layout.ll
+++ b/llvm/test/CodeGen/MSP430/struct_layout.ll
@@ -11,12 +11,12 @@ define void @foo() {
   %1 = alloca %struct.X
   %2 = alloca %struct.X
   %3 = alloca %struct.X
-  %4 = getelementptr inbounds %struct.X, %struct.X* %1, i32 0, i32 0
-  store i8 1, i8* %4
-  %5 = getelementptr inbounds %struct.X, %struct.X* %2, i32 0, i32 0
-  store i8 1, i8* %5
-  %6 = getelementptr inbounds %struct.X, %struct.X* %3, i32 0, i32 0
-  store i8 1, i8* %6
+  %4 = getelementptr inbounds %struct.X, ptr %1, i32 0, i32 0
+  store i8 1, ptr %4
+  %5 = getelementptr inbounds %struct.X, ptr %2, i32 0, i32 0
+  store i8 1, ptr %5
+  %6 = getelementptr inbounds %struct.X, ptr %3, i32 0, i32 0
+  store i8 1, ptr %6
   ret void
 }
 
@@ -25,15 +25,15 @@ define void @foo() {
 ; CHECK: mov.b   #1, 3(r1)
 define void @bar() {
   %1 = alloca [3 x %struct.X]
-  %2 = getelementptr inbounds [3 x %struct.X], [3 x %struct.X]* %1, i16 0, i16 0
-  %3 = getelementptr inbounds %struct.X, %struct.X* %2, i32 0, i32 0
-  store i8 1, i8* %3
-  %4 = getelementptr inbounds [3 x %struct.X], [3 x %struct.X]* %1, i16 0, i16 1
-  %5 = getelementptr inbounds %struct.X, %struct.X* %4, i32 0, i32 0
-  store i8 1, i8* %5
-  %6 = getelementptr inbounds [3 x %struct.X], [3 x %struct.X]* %1, i16 0, i16 2
-  %7 = getelementptr inbounds %struct.X, %struct.X* %6, i32 0, i32 0
-  store i8 1, i8* %7
+  %2 = getelementptr inbounds [3 x %struct.X], ptr %1, i16 0, i16 0
+  %3 = getelementptr inbounds %struct.X, ptr %2, i32 0, i32 0
+  store i8 1, ptr %3
+  %4 = getelementptr inbounds [3 x %struct.X], ptr %1, i16 0, i16 1
+  %5 = getelementptr inbounds %struct.X, ptr %4, i32 0, i32 0
+  store i8 1, ptr %5
+  %6 = getelementptr inbounds [3 x %struct.X], ptr %1, i16 0, i16 2
+  %7 = getelementptr inbounds %struct.X, ptr %6, i32 0, i32 0
+  store i8 1, ptr %7
   ret void
 }
 
@@ -45,13 +45,13 @@ define void @bar() {
 define void @baz() {
   %1 = alloca %struct.Y, align 2
   %2 = alloca %struct.Y, align 2
-  %3 = getelementptr inbounds %struct.Y, %struct.Y* %1, i32 0, i32 0
-  store i8 1, i8* %3, align 2
-  %4 = getelementptr inbounds %struct.Y, %struct.Y* %1, i32 0, i32 1
-  store i16 2, i16* %4, align 2
-  %5 = getelementptr inbounds %struct.Y, %struct.Y* %2, i32 0, i32 0
-  store i8 1, i8* %5, align 2
-  %6 = getelementptr inbounds %struct.Y, %struct.Y* %2, i32 0, i32 1
-  store i16 2, i16* %6, align 2
+  %3 = getelementptr inbounds %struct.Y, ptr %1, i32 0, i32 0
+  store i8 1, ptr %3, align 2
+  %4 = getelementptr inbounds %struct.Y, ptr %1, i32 0, i32 1
+  store i16 2, ptr %4, align 2
+  %5 = getelementptr inbounds %struct.Y, ptr %2, i32 0, i32 0
+  store i8 1, ptr %5, align 2
+  %6 = getelementptr inbounds %struct.Y, ptr %2, i32 0, i32 1
+  store i16 2, ptr %6, align 2
   ret void
 }

diff  --git a/llvm/test/CodeGen/MSP430/transient-stack-alignment.ll b/llvm/test/CodeGen/MSP430/transient-stack-alignment.ll
index 8b26ea9b517c7..9f6d94962ecd8 100644
--- a/llvm/test/CodeGen/MSP430/transient-stack-alignment.ll
+++ b/llvm/test/CodeGen/MSP430/transient-stack-alignment.ll
@@ -8,7 +8,7 @@ define void @test() #0 {
 ; CHECK: sub #2, r1
   %1 = alloca i8, align 1
 ; CHECK-NEXT: clr.b 1(r1)
-  store i8 0, i8* %1, align 1
+  store i8 0, ptr %1, align 1
 ; CHECK-NEXT: add #2, r1
 ; CHECK-NEXT: ret
   ret void

diff  --git a/llvm/test/CodeGen/MSP430/vararg.ll b/llvm/test/CodeGen/MSP430/vararg.ll
index edb61d2221ef9..ceba7b123d820 100644
--- a/llvm/test/CodeGen/MSP430/vararg.ll
+++ b/llvm/test/CodeGen/MSP430/vararg.ll
@@ -3,47 +3,47 @@
 target datalayout = "e-p:16:16:16-i8:8:8-i16:16:16-i32:16:32-n8:16"
 target triple = "msp430---elf"
 
-declare void @llvm.va_start(i8*) nounwind
-declare void @llvm.va_end(i8*) nounwind
-declare void @llvm.va_copy(i8*, i8*) nounwind
+declare void @llvm.va_start(ptr) nounwind
+declare void @llvm.va_end(ptr) nounwind
+declare void @llvm.va_copy(ptr, ptr) nounwind
 
 define void @va_start(i16 %a, ...) nounwind {
 entry:
 ; CHECK-LABEL: va_start:
 ; CHECK: sub #2, r1
-  %vl = alloca i8*, align 2
-  %vl1 = bitcast i8** %vl to i8*
+  %vl = alloca ptr, align 2
+  %vl1 = bitcast ptr %vl to ptr
 ; CHECK-NEXT: mov r1, [[REG:r[0-9]+]]
 ; CHECK-NEXT: add #6, [[REG]]
 ; CHECK-NEXT: mov [[REG]], 0(r1)
-  call void @llvm.va_start(i8* %vl1)
-  call void @llvm.va_end(i8* %vl1)
+  call void @llvm.va_start(ptr %vl1)
+  call void @llvm.va_end(ptr %vl1)
   ret void
 }
 
-define i16 @va_arg(i8* %vl) nounwind {
+define i16 @va_arg(ptr %vl) nounwind {
 entry:
 ; CHECK-LABEL: va_arg:
-  %vl.addr = alloca i8*, align 2
-  store i8* %vl, i8** %vl.addr, align 2
+  %vl.addr = alloca ptr, align 2
+  store ptr %vl, ptr %vl.addr, align 2
 ; CHECK: mov r12, [[REG:r[0-9]+]]
 ; CHECK-NEXT: incd [[REG]]
 ; CHECK-NEXT: mov [[REG]], 0(r1)
-  %0 = va_arg i8** %vl.addr, i16
+  %0 = va_arg ptr %vl.addr, i16
 ; CHECK-NEXT: mov 0(r12), r12
   ret i16 %0
 }
 
-define void @va_copy(i8* %vl) nounwind {
+define void @va_copy(ptr %vl) nounwind {
 entry:
 ; CHECK-LABEL: va_copy:
-  %vl.addr = alloca i8*, align 2
-  %vl2 = alloca i8*, align 2
+  %vl.addr = alloca ptr, align 2
+  %vl2 = alloca ptr, align 2
 ; CHECK-DAG: mov r12, 2(r1)
-  store i8* %vl, i8** %vl.addr, align 2
-  %0 = bitcast i8** %vl2 to i8*
-  %1 = bitcast i8** %vl.addr to i8*
+  store ptr %vl, ptr %vl.addr, align 2
+  %0 = bitcast ptr %vl2 to ptr
+  %1 = bitcast ptr %vl.addr to ptr
 ; CHECK-DAG: mov r12, 0(r1)
-  call void @llvm.va_copy(i8* %0, i8* %1)
+  call void @llvm.va_copy(ptr %0, ptr %1)
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/var_arg.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/var_arg.mir
index bd3f03d87e75a..fd3d9ae8554fe 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/var_arg.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/var_arg.mir
@@ -3,30 +3,30 @@
 --- |
 
   @.str = private unnamed_addr constant [11 x i8] c"string %s\0A\00", align 1
-  declare void @llvm.va_start(i8*) #0
-  declare void @llvm.va_copy(i8*, i8*) #0
-  declare i32 @printf(i8*, ...)
+  declare void @llvm.va_start(ptr) #0
+  declare void @llvm.va_copy(ptr, ptr) #0
+  declare i32 @printf(ptr, ...)
 
-  define void @testVaCopyArg(i8* %fmt, ...) {
+  define void @testVaCopyArg(ptr %fmt, ...) {
   entry:
-    %fmt.addr = alloca i8*, align 4
-    %ap = alloca i8*, align 4
-    %aq = alloca i8*, align 4
-    %s = alloca i8*, align 4
-    store i8* %fmt, i8** %fmt.addr, align 4
-    %ap1 = bitcast i8** %ap to i8*
-    call void @llvm.va_start(i8* %ap1)
-    %0 = bitcast i8** %aq to i8*
-    %1 = bitcast i8** %ap to i8*
-    call void @llvm.va_copy(i8* %0, i8* %1)
-    %argp.cur = load i8*, i8** %aq, align 4
-    %argp.next = getelementptr inbounds i8, i8* %argp.cur, i32 4
-    store i8* %argp.next, i8** %aq, align 4
-    %2 = bitcast i8* %argp.cur to i8**
-    %3 = load i8*, i8** %2, align 4
-    store i8* %3, i8** %s, align 4
-    %4 = load i8*, i8** %s, align 4
-    %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @.str, i32 0, i32 0), i8* %4)
+    %fmt.addr = alloca ptr, align 4
+    %ap = alloca ptr, align 4
+    %aq = alloca ptr, align 4
+    %s = alloca ptr, align 4
+    store ptr %fmt, ptr %fmt.addr, align 4
+    %ap1 = bitcast ptr %ap to ptr
+    call void @llvm.va_start(ptr %ap1)
+    %0 = bitcast ptr %aq to ptr
+    %1 = bitcast ptr %ap to ptr
+    call void @llvm.va_copy(ptr %0, ptr %1)
+    %argp.cur = load ptr, ptr %aq, align 4
+    %argp.next = getelementptr inbounds i8, ptr %argp.cur, i32 4
+    store ptr %argp.next, ptr %aq, align 4
+    %2 = bitcast ptr %argp.cur to ptr
+    %3 = load ptr, ptr %2, align 4
+    store ptr %3, ptr %s, align 4
+    %4 = load ptr, ptr %s, align 4
+    %call = call i32 (ptr, ...) @printf(ptr @.str, ptr %4)
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/irtranslator/sret_pointer.ll b/llvm/test/CodeGen/Mips/GlobalISel/irtranslator/sret_pointer.ll
index 5a0b0b7e321f5..f3f762c742eb4 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/irtranslator/sret_pointer.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/irtranslator/sret_pointer.ll
@@ -3,7 +3,7 @@
 
 %struct.S = type { i32, i32 }
 
-define void @ZeroInit(%struct.S* noalias sret(%struct.S) %agg.result) {
+define void @ZeroInit(ptr noalias sret(%struct.S) %agg.result) {
   ; MIPS32-LABEL: name: ZeroInit
   ; MIPS32: bb.1.entry:
   ; MIPS32-NEXT:   liveins: $a0
@@ -17,14 +17,14 @@ define void @ZeroInit(%struct.S* noalias sret(%struct.S) %agg.result) {
   ; MIPS32-NEXT:   G_STORE [[C]](s32), [[PTR_ADD]](p0) :: (store (s32) into %ir.y)
   ; MIPS32-NEXT:   RetRA
 entry:
-  %x = getelementptr inbounds %struct.S, %struct.S* %agg.result, i32 0, i32 0
-  store i32 0, i32* %x, align 4
-  %y = getelementptr inbounds %struct.S, %struct.S* %agg.result, i32 0, i32 1
-  store i32 0, i32* %y, align 4
+  %x = getelementptr inbounds %struct.S, ptr %agg.result, i32 0, i32 0
+  store i32 0, ptr %x, align 4
+  %y = getelementptr inbounds %struct.S, ptr %agg.result, i32 0, i32 1
+  store i32 0, ptr %y, align 4
   ret void
 }
 
-define void @CallZeroInit(%struct.S* noalias sret(%struct.S) %agg.result) {
+define void @CallZeroInit(ptr noalias sret(%struct.S) %agg.result) {
   ; MIPS32-LABEL: name: CallZeroInit
   ; MIPS32: bb.1.entry:
   ; MIPS32-NEXT:   liveins: $a0
@@ -36,6 +36,6 @@ define void @CallZeroInit(%struct.S* noalias sret(%struct.S) %agg.result) {
   ; MIPS32-NEXT:   ADJCALLSTACKUP 16, 0, implicit-def $sp, implicit $sp
   ; MIPS32-NEXT:   RetRA
 entry:
-  call void @ZeroInit(%struct.S* sret(%struct.S) %agg.result)
+  call void @ZeroInit(ptr sret(%struct.S) %agg.result)
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/store_split_because_of_memsize_or_align.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/store_split_because_of_memsize_or_align.mir
index ea242cca1cbbe..1038571cea7e6 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/store_split_because_of_memsize_or_align.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/store_split_because_of_memsize_or_align.mir
@@ -29,179 +29,179 @@
   @i64_align4 = common global i64 0, align 4
   @i64_align8 = common global i64 0, align 8
 
-  define void @store3align1(%struct.MemSize3_Align1* %S, i32 signext %a) {
+  define void @store3align1(ptr %S, i32 signext %a) {
   entry:
-    %0 = bitcast %struct.MemSize3_Align1* %S to i24*
+    %0 = bitcast ptr %S to ptr
     %1 = trunc i32 %a to i24
-    store i24 %1, i24* %0, align 1
+    store i24 %1, ptr %0, align 1
     ret void
   }
 
-  define void @store3align2(%struct.MemSize3_Align2* %S, i32 signext %a) {
+  define void @store3align2(ptr %S, i32 signext %a) {
   entry:
-    %0 = bitcast %struct.MemSize3_Align2* %S to i24*
+    %0 = bitcast ptr %S to ptr
     %1 = trunc i32 %a to i24
-    store i24 %1, i24* %0, align 2
+    store i24 %1, ptr %0, align 2
     ret void
   }
 
-  define void @store3align4(%struct.MemSize3_Align4* %S, i32 signext %a) {
+  define void @store3align4(ptr %S, i32 signext %a) {
   entry:
-    %0 = bitcast %struct.MemSize3_Align4* %S to i24*
+    %0 = bitcast ptr %S to ptr
     %1 = trunc i32 %a to i24
-    store i24 %1, i24* %0, align 4
+    store i24 %1, ptr %0, align 4
     ret void
   }
 
-  define void @store3align8(%struct.MemSize3_Align8* %S, i32 signext %a) {
+  define void @store3align8(ptr %S, i32 signext %a) {
   entry:
-    %0 = bitcast %struct.MemSize3_Align8* %S to i24*
+    %0 = bitcast ptr %S to ptr
     %1 = trunc i32 %a to i24
-    store i24 %1, i24* %0, align 8
+    store i24 %1, ptr %0, align 8
     ret void
   }
 
-  define void @store5align1(%struct.MemSize5_Align1* %S, i64 %a) {
+  define void @store5align1(ptr %S, i64 %a) {
   entry:
-    %0 = bitcast %struct.MemSize5_Align1* %S to i40*
+    %0 = bitcast ptr %S to ptr
     %1 = trunc i64 %a to i40
-    store i40 %1, i40* %0, align 1
+    store i40 %1, ptr %0, align 1
     ret void
   }
 
-  define void @store5align2(%struct.MemSize5_Align2* %S, i64 %a) {
+  define void @store5align2(ptr %S, i64 %a) {
   entry:
-    %0 = bitcast %struct.MemSize5_Align2* %S to i40*
+    %0 = bitcast ptr %S to ptr
     %1 = trunc i64 %a to i40
-    store i40 %1, i40* %0, align 2
+    store i40 %1, ptr %0, align 2
     ret void
   }
 
-  define void @store5align4(%struct.MemSize5_Align4* %S, i64 %a) {
+  define void @store5align4(ptr %S, i64 %a) {
   entry:
-    %0 = bitcast %struct.MemSize5_Align4* %S to i40*
+    %0 = bitcast ptr %S to ptr
     %1 = trunc i64 %a to i40
-    store i40 %1, i40* %0, align 4
+    store i40 %1, ptr %0, align 4
     ret void
   }
 
-  define void @store5align8(%struct.MemSize5_Align8* %S, i64 %a) {
+  define void @store5align8(ptr %S, i64 %a) {
   entry:
-    %0 = bitcast %struct.MemSize5_Align8* %S to i40*
+    %0 = bitcast ptr %S to ptr
     %1 = trunc i64 %a to i40
-    store i40 %1, i40* %0, align 8
+    store i40 %1, ptr %0, align 8
     ret void
   }
 
-  define void @store6align1(%struct.MemSize6_Align1* %S, i64 %a) {
+  define void @store6align1(ptr %S, i64 %a) {
   entry:
-    %0 = bitcast %struct.MemSize6_Align1* %S to i48*
+    %0 = bitcast ptr %S to ptr
     %1 = trunc i64 %a to i48
-    store i48 %1, i48* %0, align 1
+    store i48 %1, ptr %0, align 1
     ret void
   }
 
-  define void @store6align2(%struct.MemSize6_Align2* %S, i64 %a) {
+  define void @store6align2(ptr %S, i64 %a) {
   entry:
-    %0 = bitcast %struct.MemSize6_Align2* %S to i48*
+    %0 = bitcast ptr %S to ptr
     %1 = trunc i64 %a to i48
-    store i48 %1, i48* %0, align 2
+    store i48 %1, ptr %0, align 2
     ret void
   }
 
-  define void @store6align4(%struct.MemSize6_Align4* %S, i64 %a) {
+  define void @store6align4(ptr %S, i64 %a) {
   entry:
-    %0 = bitcast %struct.MemSize6_Align4* %S to i48*
+    %0 = bitcast ptr %S to ptr
     %1 = trunc i64 %a to i48
-    store i48 %1, i48* %0, align 4
+    store i48 %1, ptr %0, align 4
     ret void
   }
 
-  define void @store6align8(%struct.MemSize6_Align8* %S, i64 %a) {
+  define void @store6align8(ptr %S, i64 %a) {
   entry:
-    %0 = bitcast %struct.MemSize6_Align8* %S to i48*
+    %0 = bitcast ptr %S to ptr
     %1 = trunc i64 %a to i48
-    store i48 %1, i48* %0, align 8
+    store i48 %1, ptr %0, align 8
     ret void
   }
 
-  define void @store7align1(%struct.MemSize7_Align1* %S, i64 %a) {
+  define void @store7align1(ptr %S, i64 %a) {
   entry:
-    %0 = bitcast %struct.MemSize7_Align1* %S to i56*
+    %0 = bitcast ptr %S to ptr
     %1 = trunc i64 %a to i56
-    store i56 %1, i56* %0, align 1
+    store i56 %1, ptr %0, align 1
     ret void
   }
 
-  define void @store7align2(%struct.MemSize7_Align2* %S, i64 %a) {
+  define void @store7align2(ptr %S, i64 %a) {
   entry:
-    %0 = bitcast %struct.MemSize7_Align2* %S to i56*
+    %0 = bitcast ptr %S to ptr
     %1 = trunc i64 %a to i56
-    store i56 %1, i56* %0, align 2
+    store i56 %1, ptr %0, align 2
     ret void
   }
 
-  define void @store7align4(%struct.MemSize7_Align4* %S, i64 %a) {
+  define void @store7align4(ptr %S, i64 %a) {
   entry:
-    %0 = bitcast %struct.MemSize7_Align4* %S to i56*
+    %0 = bitcast ptr %S to ptr
     %1 = trunc i64 %a to i56
-    store i56 %1, i56* %0, align 4
+    store i56 %1, ptr %0, align 4
     ret void
   }
 
-  define void @store7align8(%struct.MemSize7_Align8* %S, i64 %a) {
+  define void @store7align8(ptr %S, i64 %a) {
   entry:
-    %0 = bitcast %struct.MemSize7_Align8* %S to i56*
+    %0 = bitcast ptr %S to ptr
     %1 = trunc i64 %a to i56
-    store i56 %1, i56* %0, align 8
+    store i56 %1, ptr %0, align 8
     ret void
   }
 
   define void @store_double_align1(double %a) {
   entry:
-    store double %a, double* @double_align1, align 1
+    store double %a, ptr @double_align1, align 1
     ret void
   }
 
   define void @store_double_align2(double %a) {
   entry:
-    store double %a, double* @double_align2, align 2
+    store double %a, ptr @double_align2, align 2
     ret void
   }
 
   define void @store_double_align4(double %a) {
   entry:
-    store double %a, double* @double_align4, align 4
+    store double %a, ptr @double_align4, align 4
     ret void
   }
 
   define void @store_double_align8(double %a) {
   entry:
-    store double %a, double* @double_align8, align 8
+    store double %a, ptr @double_align8, align 8
     ret void
   }
 
   define void @store_i64_align1(i64 %a) {
   entry:
-    store i64 %a, i64* @i64_align1, align 1
+    store i64 %a, ptr @i64_align1, align 1
     ret void
   }
 
   define void @store_i64_align2(i64 signext %a) {
   entry:
-    store i64 %a, i64* @i64_align2, align 2
+    store i64 %a, ptr @i64_align2, align 2
     ret void
   }
 
   define void @store_i64_align4(i64 %a) {
   entry:
-    store i64 %a, i64* @i64_align4, align 4
+    store i64 %a, ptr @i64_align4, align 4
     ret void
   }
 
   define void @store_i64_align8(i64 signext %a) {
   entry:
-    store i64 %a, i64* @i64_align8, align 8
+    store i64 %a, ptr @i64_align8, align 8
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/var_arg.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/var_arg.mir
index dfda755bff268..4b502cd3f6821 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/var_arg.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/var_arg.mir
@@ -3,30 +3,30 @@
 --- |
 
   @.str = private unnamed_addr constant [11 x i8] c"string %s\0A\00", align 1
-  declare void @llvm.va_start(i8*) #0
-  declare void @llvm.va_copy(i8*, i8*) #0
-  declare i32 @printf(i8*, ...)
+  declare void @llvm.va_start(ptr) #0
+  declare void @llvm.va_copy(ptr, ptr) #0
+  declare i32 @printf(ptr, ...)
 
-  define void @testVaCopyArg(i8* %fmt, ...) {
+  define void @testVaCopyArg(ptr %fmt, ...) {
   entry:
-    %fmt.addr = alloca i8*, align 4
-    %ap = alloca i8*, align 4
-    %aq = alloca i8*, align 4
-    %s = alloca i8*, align 4
-    store i8* %fmt, i8** %fmt.addr, align 4
-    %ap1 = bitcast i8** %ap to i8*
-    call void @llvm.va_start(i8* %ap1)
-    %0 = bitcast i8** %aq to i8*
-    %1 = bitcast i8** %ap to i8*
-    call void @llvm.va_copy(i8* %0, i8* %1)
-    %argp.cur = load i8*, i8** %aq, align 4
-    %argp.next = getelementptr inbounds i8, i8* %argp.cur, i32 4
-    store i8* %argp.next, i8** %aq, align 4
-    %2 = bitcast i8* %argp.cur to i8**
-    %3 = load i8*, i8** %2, align 4
-    store i8* %3, i8** %s, align 4
-    %4 = load i8*, i8** %s, align 4
-    %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @.str, i32 0, i32 0), i8* %4)
+    %fmt.addr = alloca ptr, align 4
+    %ap = alloca ptr, align 4
+    %aq = alloca ptr, align 4
+    %s = alloca ptr, align 4
+    store ptr %fmt, ptr %fmt.addr, align 4
+    %ap1 = bitcast ptr %ap to ptr
+    call void @llvm.va_start(ptr %ap1)
+    %0 = bitcast ptr %aq to ptr
+    %1 = bitcast ptr %ap to ptr
+    call void @llvm.va_copy(ptr %0, ptr %1)
+    %argp.cur = load ptr, ptr %aq, align 4
+    %argp.next = getelementptr inbounds i8, ptr %argp.cur, i32 4
+    store ptr %argp.next, ptr %aq, align 4
+    %2 = bitcast ptr %argp.cur to ptr
+    %3 = load ptr, ptr %2, align 4
+    store ptr %3, ptr %s, align 4
+    %4 = load ptr, ptr %s, align 4
+    %call = call i32 (ptr, ...) @printf(ptr @.str, ptr %4)
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/mips-prelegalizer-combiner/inline-memcpy.mir b/llvm/test/CodeGen/Mips/GlobalISel/mips-prelegalizer-combiner/inline-memcpy.mir
index d131d8c703292..3d6a2431a4960 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/mips-prelegalizer-combiner/inline-memcpy.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/mips-prelegalizer-combiner/inline-memcpy.mir
@@ -6,13 +6,13 @@
   target datalayout = "e-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64"
   target triple = "mipsel-pc-linux-gnu"
 
-  declare void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i64 immarg, i1 immarg) #0
+  declare void @llvm.memcpy.inline.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64 immarg, i1 immarg) #0
 
-  define void @test_memcpy_inline(i32* nocapture %dst, i32* nocapture readonly %src) local_unnamed_addr {
+  define void @test_memcpy_inline(ptr nocapture %dst, ptr nocapture readonly %src) local_unnamed_addr {
   entry:
-    %0 = bitcast i32* %dst to i8*
-    %1 = bitcast i32* %src to i8*
-    tail call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 2, i1 false)
+    %0 = bitcast ptr %dst to ptr
+    %1 = bitcast ptr %src to ptr
+    tail call void @llvm.memcpy.inline.p0.p0.i64(ptr align 4 %0, ptr align 4 %1, i64 2, i1 false)
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/var_arg.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/var_arg.mir
index ccd1b6c78c618..861f94bb0829c 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/var_arg.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/var_arg.mir
@@ -3,30 +3,30 @@
 --- |
 
   @.str = private unnamed_addr constant [11 x i8] c"string %s\0A\00", align 1
-  declare void @llvm.va_start(i8*) #0
-  declare void @llvm.va_copy(i8*, i8*) #0
-  declare i32 @printf(i8*, ...)
+  declare void @llvm.va_start(ptr) #0
+  declare void @llvm.va_copy(ptr, ptr) #0
+  declare i32 @printf(ptr, ...)
 
-  define void @testVaCopyArg(i8* %fmt, ...) {
+  define void @testVaCopyArg(ptr %fmt, ...) {
   entry:
-    %fmt.addr = alloca i8*, align 4
-    %ap = alloca i8*, align 4
-    %aq = alloca i8*, align 4
-    %s = alloca i8*, align 4
-    store i8* %fmt, i8** %fmt.addr, align 4
-    %ap1 = bitcast i8** %ap to i8*
-    call void @llvm.va_start(i8* %ap1)
-    %0 = bitcast i8** %aq to i8*
-    %1 = bitcast i8** %ap to i8*
-    call void @llvm.va_copy(i8* %0, i8* %1)
-    %argp.cur = load i8*, i8** %aq, align 4
-    %argp.next = getelementptr inbounds i8, i8* %argp.cur, i32 4
-    store i8* %argp.next, i8** %aq, align 4
-    %2 = bitcast i8* %argp.cur to i8**
-    %3 = load i8*, i8** %2, align 4
-    store i8* %3, i8** %s, align 4
-    %4 = load i8*, i8** %s, align 4
-    %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @.str, i32 0, i32 0), i8* %4)
+    %fmt.addr = alloca ptr, align 4
+    %ap = alloca ptr, align 4
+    %aq = alloca ptr, align 4
+    %s = alloca ptr, align 4
+    store ptr %fmt, ptr %fmt.addr, align 4
+    %ap1 = bitcast ptr %ap to ptr
+    call void @llvm.va_start(ptr %ap1)
+    %0 = bitcast ptr %aq to ptr
+    %1 = bitcast ptr %ap to ptr
+    call void @llvm.va_copy(ptr %0, ptr %1)
+    %argp.cur = load ptr, ptr %aq, align 4
+    %argp.next = getelementptr inbounds i8, ptr %argp.cur, i32 4
+    store ptr %argp.next, ptr %aq, align 4
+    %2 = bitcast ptr %argp.cur to ptr
+    %3 = load ptr, ptr %2, align 4
+    store ptr %3, ptr %s, align 4
+    %4 = load ptr, ptr %s, align 4
+    %call = call i32 (ptr, ...) @printf(ptr @.str, ptr %4)
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/Mips/hf16call32.ll b/llvm/test/CodeGen/Mips/hf16call32.ll
index d44224f57d107..881ad12d65a21 100644
--- a/llvm/test/CodeGen/Mips/hf16call32.ll
+++ b/llvm/test/CodeGen/Mips/hf16call32.ll
@@ -33,29 +33,29 @@ entry:
   store float 1.000000e+00, ptr @y, align 4
   store double 1.000000e+00, ptr @xd, align 8
   store double 1.000000e+00, ptr @yd, align 8
-  store float 1.000000e+00, ptr getelementptr inbounds ({ float, float }, ptr @xy, i32 0, i32 0)
+  store float 1.000000e+00, ptr @xy
   store float 0.000000e+00, ptr getelementptr inbounds ({ float, float }, ptr @xy, i32 0, i32 1)
-  store double 1.000000e+00, ptr getelementptr inbounds ({ double, double }, ptr @xyd, i32 0, i32 0)
+  store double 1.000000e+00, ptr @xyd
   store double 0.000000e+00, ptr getelementptr inbounds ({ double, double }, ptr @xyd, i32 0, i32 1)
   store float 1.000000e+00, ptr @ret_sf, align 4
   store double 1.000000e+00, ptr @ret_df, align 8
-  store float 1.000000e+00, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 0)
+  store float 1.000000e+00, ptr @ret_sc
   store float 0.000000e+00, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 1)
-  store double 1.000000e+00, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 0)
+  store double 1.000000e+00, ptr @ret_dc
   store double 0.000000e+00, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 1)
   store float 0.000000e+00, ptr @lx, align 4
   store float 0.000000e+00, ptr @ly, align 4
   store double 0.000000e+00, ptr @lxd, align 8
   store double 0.000000e+00, ptr @lyd, align 8
-  store float 0.000000e+00, ptr getelementptr inbounds ({ float, float }, ptr @lxy, i32 0, i32 0)
+  store float 0.000000e+00, ptr @lxy
   store float 0.000000e+00, ptr getelementptr inbounds ({ float, float }, ptr @lxy, i32 0, i32 1)
-  store double 0.000000e+00, ptr getelementptr inbounds ({ double, double }, ptr @lxyd, i32 0, i32 0)
+  store double 0.000000e+00, ptr @lxyd
   store double 0.000000e+00, ptr getelementptr inbounds ({ double, double }, ptr @lxyd, i32 0, i32 1)
   store float 0.000000e+00, ptr @lret_sf, align 4
   store double 0.000000e+00, ptr @lret_df, align 8
-  store float 0.000000e+00, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 0)
+  store float 0.000000e+00, ptr @lret_sc
   store float 0.000000e+00, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 1)
-  store double 0.000000e+00, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 0)
+  store double 0.000000e+00, ptr @lret_dc
   store double 0.000000e+00, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 1)
   ret void
 }
@@ -599,32 +599,32 @@ land.end198:                                      ; preds = %land.rhs195, %land.
   %land.ext199 = zext i1 %214 to i32
   %call200 = call i32 (ptr, ...) @printf(ptr @.str2, double %202, double %203, double %204, double %205, double %206, double %207, i32 %land.ext199)
   call void @clear()
-  store float 4.500000e+00, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 0)
+  store float 4.500000e+00, ptr @ret_sc
   store float 7.000000e+00, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 1)
   %call201 = call { float, float } @sc_v()
   %215 = extractvalue { float, float } %call201, 0
   %216 = extractvalue { float, float } %call201, 1
-  store float %215, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 0)
+  store float %215, ptr @lret_sc
   store float %216, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 1)
-  %ret_sc.real = load float, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 0)
+  %ret_sc.real = load float, ptr @ret_sc
   %ret_sc.imag = load float, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 1)
   %conv202 = fpext float %ret_sc.real to double
   %conv203 = fpext float %ret_sc.imag to double
-  %ret_sc.real204 = load float, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 0)
+  %ret_sc.real204 = load float, ptr @ret_sc
   %ret_sc.imag205 = load float, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 1)
   %conv206 = fpext float %ret_sc.real204 to double
   %conv207 = fpext float %ret_sc.imag205 to double
-  %lret_sc.real = load float, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 0)
+  %lret_sc.real = load float, ptr @lret_sc
   %lret_sc.imag = load float, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 1)
   %conv208 = fpext float %lret_sc.real to double
   %conv209 = fpext float %lret_sc.imag to double
-  %lret_sc.real210 = load float, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 0)
+  %lret_sc.real210 = load float, ptr @lret_sc
   %lret_sc.imag211 = load float, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 1)
   %conv212 = fpext float %lret_sc.real210 to double
   %conv213 = fpext float %lret_sc.imag211 to double
-  %ret_sc.real214 = load float, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 0)
+  %ret_sc.real214 = load float, ptr @ret_sc
   %ret_sc.imag215 = load float, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 1)
-  %lret_sc.real216 = load float, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 0)
+  %lret_sc.real216 = load float, ptr @lret_sc
   %lret_sc.imag217 = load float, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 1)
   %cmp.r = fcmp oeq float %ret_sc.real214, %lret_sc.real216
   %cmp.i = fcmp oeq float %ret_sc.imag215, %lret_sc.imag217
@@ -633,27 +633,27 @@ land.end198:                                      ; preds = %land.rhs195, %land.
   %call219 = call i32 (ptr, ...) @printf(ptr @.str3, double %conv202, double %conv207, double %conv208, double %conv213, i32 %conv218)
   call void @clear()
   store float 0x3FF7A99300000000, ptr @lx, align 4
-  store float 4.500000e+00, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 0)
+  store float 4.500000e+00, ptr @ret_sc
   store float 7.000000e+00, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 1)
   %217 = load float, ptr @lx, align 4
   %call220 = call { float, float } @sc_sf(float %217)
   %218 = extractvalue { float, float } %call220, 0
   %219 = extractvalue { float, float } %call220, 1
-  store float %218, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 0)
+  store float %218, ptr @lret_sc
   store float %219, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 1)
-  %ret_sc.real221 = load float, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 0)
+  %ret_sc.real221 = load float, ptr @ret_sc
   %ret_sc.imag222 = load float, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 1)
   %conv223 = fpext float %ret_sc.real221 to double
   %conv224 = fpext float %ret_sc.imag222 to double
-  %ret_sc.real225 = load float, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 0)
+  %ret_sc.real225 = load float, ptr @ret_sc
   %ret_sc.imag226 = load float, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 1)
   %conv227 = fpext float %ret_sc.real225 to double
   %conv228 = fpext float %ret_sc.imag226 to double
-  %lret_sc.real229 = load float, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 0)
+  %lret_sc.real229 = load float, ptr @lret_sc
   %lret_sc.imag230 = load float, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 1)
   %conv231 = fpext float %lret_sc.real229 to double
   %conv232 = fpext float %lret_sc.imag230 to double
-  %lret_sc.real233 = load float, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 0)
+  %lret_sc.real233 = load float, ptr @lret_sc
   %lret_sc.imag234 = load float, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 1)
   %conv235 = fpext float %lret_sc.real233 to double
   %conv236 = fpext float %lret_sc.imag234 to double
@@ -661,9 +661,9 @@ land.end198:                                      ; preds = %land.rhs195, %land.
   %conv237 = fpext float %220 to double
   %221 = load float, ptr @lx, align 4
   %conv238 = fpext float %221 to double
-  %ret_sc.real239 = load float, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 0)
+  %ret_sc.real239 = load float, ptr @ret_sc
   %ret_sc.imag240 = load float, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 1)
-  %lret_sc.real241 = load float, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 0)
+  %lret_sc.real241 = load float, ptr @lret_sc
   %lret_sc.imag242 = load float, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 1)
   %cmp.r243 = fcmp oeq float %ret_sc.real239, %lret_sc.real241
   %cmp.i244 = fcmp oeq float %ret_sc.imag240, %lret_sc.imag242
@@ -681,24 +681,24 @@ land.end250:                                      ; preds = %land.rhs247, %land.
   %land.ext251 = zext i1 %224 to i32
   %call252 = call i32 (ptr, ...) @printf(ptr @.str4, double %conv223, double %conv228, double %conv231, double %conv236, double %conv237, double %conv238, i32 %land.ext251)
   call void @clear()
-  store double 1.234500e+03, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 0)
+  store double 1.234500e+03, ptr @ret_dc
   store double 7.677000e+03, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 1)
   %call253 = call { double, double } @dc_v()
   %225 = extractvalue { double, double } %call253, 0
   %226 = extractvalue { double, double } %call253, 1
-  store double %225, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 0)
+  store double %225, ptr @lret_dc
   store double %226, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 1)
-  %ret_dc.real = load double, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 0)
+  %ret_dc.real = load double, ptr @ret_dc
   %ret_dc.imag = load double, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 1)
-  %ret_dc.real254 = load double, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 0)
+  %ret_dc.real254 = load double, ptr @ret_dc
   %ret_dc.imag255 = load double, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 1)
-  %lret_dc.real = load double, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 0)
+  %lret_dc.real = load double, ptr @lret_dc
   %lret_dc.imag = load double, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 1)
-  %lret_dc.real256 = load double, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 0)
+  %lret_dc.real256 = load double, ptr @lret_dc
   %lret_dc.imag257 = load double, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 1)
-  %ret_dc.real258 = load double, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 0)
+  %ret_dc.real258 = load double, ptr @ret_dc
   %ret_dc.imag259 = load double, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 1)
-  %lret_dc.real260 = load double, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 0)
+  %lret_dc.real260 = load double, ptr @lret_dc
   %lret_dc.imag261 = load double, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 1)
   %cmp.r262 = fcmp oeq double %ret_dc.real258, %lret_dc.real260
   %cmp.i263 = fcmp oeq double %ret_dc.imag259, %lret_dc.imag261
@@ -707,29 +707,29 @@ land.end250:                                      ; preds = %land.rhs247, %land.
   %call266 = call i32 (ptr, ...) @printf(ptr @.str3, double %ret_dc.real, double %ret_dc.imag255, double %lret_dc.real, double %lret_dc.imag257, i32 %conv265)
   call void @clear()
   store double 0x40AAF6F532617C1C, ptr @lxd, align 8
-  store double 4.444500e+03, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 0)
+  store double 4.444500e+03, ptr @ret_dc
   store double 7.888000e+03, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 1)
   %227 = load float, ptr @lx, align 4
   %call267 = call { double, double } @dc_sf(float %227)
   %228 = extractvalue { double, double } %call267, 0
   %229 = extractvalue { double, double } %call267, 1
-  store double %228, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 0)
+  store double %228, ptr @lret_dc
   store double %229, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 1)
-  %ret_dc.real268 = load double, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 0)
+  %ret_dc.real268 = load double, ptr @ret_dc
   %ret_dc.imag269 = load double, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 1)
-  %ret_dc.real270 = load double, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 0)
+  %ret_dc.real270 = load double, ptr @ret_dc
   %ret_dc.imag271 = load double, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 1)
-  %lret_dc.real272 = load double, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 0)
+  %lret_dc.real272 = load double, ptr @lret_dc
   %lret_dc.imag273 = load double, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 1)
-  %lret_dc.real274 = load double, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 0)
+  %lret_dc.real274 = load double, ptr @lret_dc
   %lret_dc.imag275 = load double, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 1)
   %230 = load float, ptr @x, align 4
   %conv276 = fpext float %230 to double
   %231 = load float, ptr @lx, align 4
   %conv277 = fpext float %231 to double
-  %ret_dc.real278 = load double, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 0)
+  %ret_dc.real278 = load double, ptr @ret_dc
   %ret_dc.imag279 = load double, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 1)
-  %lret_dc.real280 = load double, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 0)
+  %lret_dc.real280 = load double, ptr @lret_dc
   %lret_dc.imag281 = load double, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 1)
   %cmp.r282 = fcmp oeq double %ret_dc.real278, %lret_dc.real280
   %cmp.i283 = fcmp oeq double %ret_dc.imag279, %lret_dc.imag281

diff  --git a/llvm/test/CodeGen/Mips/hfptrcall.ll b/llvm/test/CodeGen/Mips/hfptrcall.ll
index c178b1e26cdce..b4749058ba762 100644
--- a/llvm/test/CodeGen/Mips/hfptrcall.ll
+++ b/llvm/test/CodeGen/Mips/hfptrcall.ll
@@ -80,13 +80,13 @@ entry:
   %call4 = call { float, float } %4()
   %5 = extractvalue { float, float } %call4, 0
   %6 = extractvalue { float, float } %call4, 1
-  store float %5, ptr getelementptr inbounds ({ float, float }, ptr @xy, i32 0, i32 0)
+  store float %5, ptr @xy
   store float %6, ptr getelementptr inbounds ({ float, float }, ptr @xy, i32 0, i32 1)
-  %xy.real = load float, ptr getelementptr inbounds ({ float, float }, ptr @xy, i32 0, i32 0)
+  %xy.real = load float, ptr @xy
   %xy.imag = load float, ptr getelementptr inbounds ({ float, float }, ptr @xy, i32 0, i32 1)
   %conv5 = fpext float %xy.real to double
   %conv6 = fpext float %xy.imag to double
-  %xy.real7 = load float, ptr getelementptr inbounds ({ float, float }, ptr @xy, i32 0, i32 0)
+  %xy.real7 = load float, ptr @xy
   %xy.imag8 = load float, ptr getelementptr inbounds ({ float, float }, ptr @xy, i32 0, i32 1)
   %conv9 = fpext float %xy.real7 to double
   %conv10 = fpext float %xy.imag8 to double
@@ -95,11 +95,11 @@ entry:
   %call12 = call { double, double } %7()
   %8 = extractvalue { double, double } %call12, 0
   %9 = extractvalue { double, double } %call12, 1
-  store double %8, ptr getelementptr inbounds ({ double, double }, ptr @xyd, i32 0, i32 0)
+  store double %8, ptr @xyd
   store double %9, ptr getelementptr inbounds ({ double, double }, ptr @xyd, i32 0, i32 1)
-  %xyd.real = load double, ptr getelementptr inbounds ({ double, double }, ptr @xyd, i32 0, i32 0)
+  %xyd.real = load double, ptr @xyd
   %xyd.imag = load double, ptr getelementptr inbounds ({ double, double }, ptr @xyd, i32 0, i32 1)
-  %xyd.real13 = load double, ptr getelementptr inbounds ({ double, double }, ptr @xyd, i32 0, i32 0)
+  %xyd.real13 = load double, ptr @xyd
   %xyd.imag14 = load double, ptr getelementptr inbounds ({ double, double }, ptr @xyd, i32 0, i32 1)
   %call15 = call i32 (ptr, ...) @printf(ptr @.str1, double %xyd.real, double %xyd.imag14)
   ret i32 0

diff  --git a/llvm/test/CodeGen/Mips/mips16_fpret.ll b/llvm/test/CodeGen/Mips/mips16_fpret.ll
index ca9aa84fceea1..2f68a8b96039d 100644
--- a/llvm/test/CodeGen/Mips/mips16_fpret.ll
+++ b/llvm/test/CodeGen/Mips/mips16_fpret.ll
@@ -34,7 +34,7 @@ entry:
 define { float, float } @foocx()  {
 entry:
   %retval = alloca { float, float }, align 4
-  %cx.real = load float, ptr getelementptr inbounds ({ float, float }, ptr @cx, i32 0, i32 0)
+  %cx.real = load float, ptr @cx
   %cx.imag = load float, ptr getelementptr inbounds ({ float, float }, ptr @cx, i32 0, i32 1)
   %real = getelementptr inbounds { float, float }, ptr %retval, i32 0, i32 0
   %imag = getelementptr inbounds { float, float }, ptr %retval, i32 0, i32 1
@@ -53,7 +53,7 @@ entry:
 define { double, double } @foodcx()  {
 entry:
   %retval = alloca { double, double }, align 8
-  %dcx.real = load double, ptr getelementptr inbounds ({ double, double }, ptr @dcx, i32 0, i32 0)
+  %dcx.real = load double, ptr @dcx
   %dcx.imag = load double, ptr getelementptr inbounds ({ double, double }, ptr @dcx, i32 0, i32 1)
   %real = getelementptr inbounds { double, double }, ptr %retval, i32 0, i32 0
   %imag = getelementptr inbounds { double, double }, ptr %retval, i32 0, i32 1

diff  --git a/llvm/test/CodeGen/Mips/msa/emergency-spill.mir b/llvm/test/CodeGen/Mips/msa/emergency-spill.mir
index 9aefd3a7ddf98..e1c7b2158d617 100644
--- a/llvm/test/CodeGen/Mips/msa/emergency-spill.mir
+++ b/llvm/test/CodeGen/Mips/msa/emergency-spill.mir
@@ -12,59 +12,59 @@
     %a.addr = alloca <16 x i8>, align 16
     %b.addr = alloca <16 x i8>, align 16
     %c.addr = alloca i32, align 4
-    %g = alloca <16 x i8>*, align 8
-    %d = alloca i8*, align 8
-    %0 = bitcast <16 x i8>* %a to { i64, i64 }*
-    %1 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %0, i32 0, i32 0
-    store i64 %a.coerce0, i64* %1, align 16
-    %2 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %0, i32 0, i32 1
-    store i64 %a.coerce1, i64* %2, align 8
-    %a1 = load <16 x i8>, <16 x i8>* %a, align 16
-    %3 = bitcast <16 x i8>* %b to { i64, i64 }*
-    %4 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %3, i32 0, i32 0
-    store i64 %b.coerce0, i64* %4, align 16
-    %5 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %3, i32 0, i32 1
-    store i64 %b.coerce1, i64* %5, align 8
-    %b2 = load <16 x i8>, <16 x i8>* %b, align 16
-    store <16 x i8> %a1, <16 x i8>* %a.addr, align 16
-    store <16 x i8> %b2, <16 x i8>* %b.addr, align 16
-    store i32 %c, i32* %c.addr, align 4
+    %g = alloca ptr, align 8
+    %d = alloca ptr, align 8
+    %0 = bitcast ptr %a to ptr
+    %1 = getelementptr inbounds { i64, i64 }, ptr %0, i32 0, i32 0
+    store i64 %a.coerce0, ptr %1, align 16
+    %2 = getelementptr inbounds { i64, i64 }, ptr %0, i32 0, i32 1
+    store i64 %a.coerce1, ptr %2, align 8
+    %a1 = load <16 x i8>, ptr %a, align 16
+    %3 = bitcast ptr %b to ptr
+    %4 = getelementptr inbounds { i64, i64 }, ptr %3, i32 0, i32 0
+    store i64 %b.coerce0, ptr %4, align 16
+    %5 = getelementptr inbounds { i64, i64 }, ptr %3, i32 0, i32 1
+    store i64 %b.coerce1, ptr %5, align 8
+    %b2 = load <16 x i8>, ptr %b, align 16
+    store <16 x i8> %a1, ptr %a.addr, align 16
+    store <16 x i8> %b2, ptr %b.addr, align 16
+    store i32 %c, ptr %c.addr, align 4
     %6 = alloca i8, i64 6400, align 16
-    %7 = bitcast i8* %6 to <16 x i8>*
-    store <16 x i8>* %7, <16 x i8>** %g, align 8
-    %8 = load <16 x i8>*, <16 x i8>** %g, align 8
-    call void @h(<16 x i8>* %b.addr, <16 x i8>* %8)
-    %9 = load <16 x i8>*, <16 x i8>** %g, align 8
-    %10 = bitcast <16 x i8>* %9 to i8*
-    store i8* %10, i8** %d, align 8
-    %11 = load <16 x i8>, <16 x i8>* %a.addr, align 16
-    %12 = load i8*, i8** %d, align 8
-    %arrayidx = getelementptr inbounds i8, i8* %12, i64 0
-    %13 = load i8, i8* %arrayidx, align 1
+    %7 = bitcast ptr %6 to ptr
+    store ptr %7, ptr %g, align 8
+    %8 = load ptr, ptr %g, align 8
+    call void @h(ptr %b.addr, ptr %8)
+    %9 = load ptr, ptr %g, align 8
+    %10 = bitcast ptr %9 to ptr
+    store ptr %10, ptr %d, align 8
+    %11 = load <16 x i8>, ptr %a.addr, align 16
+    %12 = load ptr, ptr %d, align 8
+    %arrayidx = getelementptr inbounds i8, ptr %12, i64 0
+    %13 = load i8, ptr %arrayidx, align 1
     %conv = sext i8 %13 to i32
     %14 = call <16 x i8> @llvm.mips.fill.b(i32 %conv)
     %add = add <16 x i8> %11, %14
-    %15 = load i8*, i8** %d, align 8
-    %arrayidx3 = getelementptr inbounds i8, i8* %15, i64 1
-    %16 = load i8, i8* %arrayidx3, align 1
+    %15 = load ptr, ptr %d, align 8
+    %arrayidx3 = getelementptr inbounds i8, ptr %15, i64 1
+    %16 = load i8, ptr %arrayidx3, align 1
     %conv4 = sext i8 %16 to i32
     %17 = call <16 x i8> @llvm.mips.fill.b(i32 %conv4)
     %add5 = add <16 x i8> %add, %17
-    %18 = load <16 x i8>, <16 x i8>* %b.addr, align 16
+    %18 = load <16 x i8>, ptr %b.addr, align 16
     %add6 = add <16 x i8> %18, %add5
-    store <16 x i8> %add6, <16 x i8>* %b.addr, align 16
-    %19 = load <16 x i8>, <16 x i8>* %b.addr, align 16
-    store <16 x i8> %19, <16 x i8>* %retval, align 16
-    %20 = bitcast <16 x i8>* %retval to { i64, i64 }*
-    %21 = load { i64, i64 }, { i64, i64 }* %20, align 16
+    store <16 x i8> %add6, ptr %b.addr, align 16
+    %19 = load <16 x i8>, ptr %b.addr, align 16
+    store <16 x i8> %19, ptr %retval, align 16
+    %20 = bitcast ptr %retval to ptr
+    %21 = load { i64, i64 }, ptr %20, align 16
     ret { i64, i64 } %21
   }
 
-  declare void @h(<16 x i8>*, <16 x i8>*)
+  declare void @h(ptr, ptr)
 
   declare <16 x i8> @llvm.mips.fill.b(i32)
 
-  declare void @llvm.stackprotector(i8*, i8**)
+  declare void @llvm.stackprotector(ptr, ptr)
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/mulull.ll b/llvm/test/CodeGen/Mips/mulull.ll
index fdcb68d036f85..749adfa055729 100644
--- a/llvm/test/CodeGen/Mips/mulull.ll
+++ b/llvm/test/CodeGen/Mips/mulull.ll
@@ -7,10 +7,10 @@
 
 define void @test() nounwind {
 entry:
-  %0 = load i64, i64* @iiii, align 8
-  %1 = load i64, i64* @jjjj, align 8
+  %0 = load i64, ptr @iiii, align 8
+  %1 = load i64, ptr @jjjj, align 8
   %mul = mul nsw i64 %1, %0
-  store i64 %mul, i64* @kkkk, align 8
+  store i64 %mul, ptr @kkkk, align 8
 ; 16:	multu	${{[0-9]+}}, ${{[0-9]+}}
 ; 16: 	mfhi	${{[0-9]+}}
 ; 16:	mult	${{[0-9]+}}, ${{[0-9]+}}

diff  --git a/llvm/test/CodeGen/NVPTX/addrspacecast.ll b/llvm/test/CodeGen/NVPTX/addrspacecast.ll
index ff51a67071dab..b680490ac5b12 100644
--- a/llvm/test/CodeGen/NVPTX/addrspacecast.ll
+++ b/llvm/test/CodeGen/NVPTX/addrspacecast.ll
@@ -6,95 +6,95 @@
 ; RUN: %if ptxas %{ llc -O0 < %s -march=nvptx64 -mcpu=sm_20 --nvptx-short-ptr | %ptxas-verify %}
 
 ; ALL-LABEL: conv1
-define i32 @conv1(i32 addrspace(1)* %ptr) {
+define i32 @conv1(ptr addrspace(1) %ptr) {
 ; G32: cvta.global.u32
 ; ALL-NOT: cvt.u64.u32
 ; G64: cvta.global.u64
 ; ALL: ld.u32
-  %genptr = addrspacecast i32 addrspace(1)* %ptr to i32*
-  %val = load i32, i32* %genptr
+  %genptr = addrspacecast ptr addrspace(1) %ptr to ptr
+  %val = load i32, ptr %genptr
   ret i32 %val
 }
 
 ; ALL-LABEL: conv2
-define i32 @conv2(i32 addrspace(3)* %ptr) {
+define i32 @conv2(ptr addrspace(3) %ptr) {
 ; CLS32: cvta.shared.u32
 ; PTRCONV: cvt.u64.u32
 ; NOPTRCONV-NOT: cvt.u64.u32
 ; CLS64: cvta.shared.u64
 ; ALL: ld.u32
-  %genptr = addrspacecast i32 addrspace(3)* %ptr to i32*
-  %val = load i32, i32* %genptr
+  %genptr = addrspacecast ptr addrspace(3) %ptr to ptr
+  %val = load i32, ptr %genptr
   ret i32 %val
 }
 
 ; ALL-LABEL: conv3
-define i32 @conv3(i32 addrspace(4)* %ptr) {
+define i32 @conv3(ptr addrspace(4) %ptr) {
 ; CLS32: cvta.const.u32
 ; PTRCONV: cvt.u64.u32
 ; NOPTRCONV-NOT: cvt.u64.u32
 ; CLS64: cvta.const.u64
 ; ALL: ld.u32
-  %genptr = addrspacecast i32 addrspace(4)* %ptr to i32*
-  %val = load i32, i32* %genptr
+  %genptr = addrspacecast ptr addrspace(4) %ptr to ptr
+  %val = load i32, ptr %genptr
   ret i32 %val
 }
 
 ; ALL-LABEL: conv4
-define i32 @conv4(i32 addrspace(5)* %ptr) {
+define i32 @conv4(ptr addrspace(5) %ptr) {
 ; CLS32: cvta.local.u32
 ; PTRCONV: cvt.u64.u32
 ; NOPTRCONV-NOT: cvt.u64.u32
 ; CLS64: cvta.local.u64
 ; ALL: ld.u32
-  %genptr = addrspacecast i32 addrspace(5)* %ptr to i32*
-  %val = load i32, i32* %genptr
+  %genptr = addrspacecast ptr addrspace(5) %ptr to ptr
+  %val = load i32, ptr %genptr
   ret i32 %val
 }
 
 ; ALL-LABEL: conv5
-define i32 @conv5(i32* %ptr) {
+define i32 @conv5(ptr %ptr) {
 ; CLS32: cvta.to.global.u32
 ; ALL-NOT: cvt.u64.u32
 ; CLS64: cvta.to.global.u64
 ; ALL: ld.global.u32
-  %specptr = addrspacecast i32* %ptr to i32 addrspace(1)*
-  %val = load i32, i32 addrspace(1)* %specptr
+  %specptr = addrspacecast ptr %ptr to ptr addrspace(1)
+  %val = load i32, ptr addrspace(1) %specptr
   ret i32 %val
 }
 
 ; ALL-LABEL: conv6
-define i32 @conv6(i32* %ptr) {
+define i32 @conv6(ptr %ptr) {
 ; CLS32: cvta.to.shared.u32
 ; CLS64: cvta.to.shared.u64
 ; PTRCONV: cvt.u32.u64
 ; NOPTRCONV-NOT: cvt.u32.u64
 ; ALL: ld.shared.u32
-  %specptr = addrspacecast i32* %ptr to i32 addrspace(3)*
-  %val = load i32, i32 addrspace(3)* %specptr
+  %specptr = addrspacecast ptr %ptr to ptr addrspace(3)
+  %val = load i32, ptr addrspace(3) %specptr
   ret i32 %val
 }
 
 ; ALL-LABEL: conv7
-define i32 @conv7(i32* %ptr) {
+define i32 @conv7(ptr %ptr) {
 ; CLS32: cvta.to.const.u32
 ; CLS64: cvta.to.const.u64
 ; PTRCONV: cvt.u32.u64
 ; NOPTRCONV-NOT: cvt.u32.u64
 ; ALL: ld.const.u32
-  %specptr = addrspacecast i32* %ptr to i32 addrspace(4)*
-  %val = load i32, i32 addrspace(4)* %specptr
+  %specptr = addrspacecast ptr %ptr to ptr addrspace(4)
+  %val = load i32, ptr addrspace(4) %specptr
   ret i32 %val
 }
 
 ; ALL-LABEL: conv8
-define i32 @conv8(i32* %ptr) {
+define i32 @conv8(ptr %ptr) {
 ; CLS32: cvta.to.local.u32
 ; CLS64: cvta.to.local.u64
 ; PTRCONV: cvt.u32.u64
 ; NOPTRCONV-NOT: cvt.u32.u64
 ; ALL: ld.local.u32
-  %specptr = addrspacecast i32* %ptr to i32 addrspace(5)*
-  %val = load i32, i32 addrspace(5)* %specptr
+  %specptr = addrspacecast ptr %ptr to ptr addrspace(5)
+  %val = load i32, ptr addrspace(5) %specptr
   ret i32 %val
 }

diff  --git a/llvm/test/CodeGen/NVPTX/bf16x2-instructions.ll b/llvm/test/CodeGen/NVPTX/bf16x2-instructions.ll
index c08f16c2644de..9bde89cdf044f 100644
--- a/llvm/test/CodeGen/NVPTX/bf16x2-instructions.ll
+++ b/llvm/test/CodeGen/NVPTX/bf16x2-instructions.ll
@@ -144,9 +144,9 @@ define <2 x bfloat> @test_fneg(<2 x bfloat> %a) #0 {
 ; CHECK-DAG:    ld.b32          [[E:%r[0-9]+]], [%[[A]]]
 ; CHECK-DAG:    st.b32          [%[[B]]], [[E]];
 ; CHECK:        ret;
-define void @test_ldst_v2bf16(<2 x bfloat>* %a, <2 x bfloat>* %b) {
-  %t1 = load <2 x bfloat>, <2 x bfloat>* %a
-  store <2 x bfloat> %t1, <2 x bfloat>* %b, align 16
+define void @test_ldst_v2bf16(ptr %a, ptr %b) {
+  %t1 = load <2 x bfloat>, ptr %a
+  store <2 x bfloat> %t1, ptr %b, align 16
   ret void
 }
 
@@ -161,9 +161,9 @@ define void @test_ldst_v2bf16(<2 x bfloat>* %a, <2 x bfloat>* %b) {
 ; CHECK-DAG:    st.u32          [%[[B]]],
 ; CHECK-DAG:    st.b16          [%[[B]]+4],
 ; CHECK:        ret;
-define void @test_ldst_v3bf16(<3 x bfloat>* %a, <3 x bfloat>* %b) {
-  %t1 = load <3 x bfloat>, <3 x bfloat>* %a
-  store <3 x bfloat> %t1, <3 x bfloat>* %b, align 16
+define void @test_ldst_v3bf16(ptr %a, ptr %b) {
+  %t1 = load <3 x bfloat>, ptr %a
+  store <3 x bfloat> %t1, ptr %b, align 16
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/NVPTX/ld-addrspace.ll b/llvm/test/CodeGen/NVPTX/ld-addrspace.ll
index 8900b58f6603f..019cc6dd5e733 100644
--- a/llvm/test/CodeGen/NVPTX/ld-addrspace.ll
+++ b/llvm/test/CodeGen/NVPTX/ld-addrspace.ll
@@ -7,157 +7,157 @@
 
 
 ;; i8
-define i8 @ld_global_i8(i8 addrspace(1)* %ptr) {
+define i8 @ld_global_i8(ptr addrspace(1) %ptr) {
 ; ALL-LABEL: ld_global_i8
 ; G32: ld.global.u8 %{{.*}}, [%r{{[0-9]+}}]
 ; G64: ld.global.u8 %{{.*}}, [%rd{{[0-9]+}}]
 ; ALL: ret
-  %a = load i8, i8 addrspace(1)* %ptr
+  %a = load i8, ptr addrspace(1) %ptr
   ret i8 %a
 }
-define i8 @ld_shared_i8(i8 addrspace(3)* %ptr) {
+define i8 @ld_shared_i8(ptr addrspace(3) %ptr) {
 ; ALL-LABEL: ld_shared_i8
 ; LS32: ld.shared.u8 %{{.*}}, [%r{{[0-9]+}}]
 ; LS64: ld.shared.u8 %{{.*}}, [%rd{{[0-9]+}}]
 ; ALL: ret
-  %a = load i8, i8 addrspace(3)* %ptr
+  %a = load i8, ptr addrspace(3) %ptr
   ret i8 %a
 }
-define i8 @ld_local_i8(i8 addrspace(5)* %ptr) {
+define i8 @ld_local_i8(ptr addrspace(5) %ptr) {
 ; ALL-LABEL: ld_local_i8
 ; LS32: ld.local.u8 %{{.*}}, [%r{{[0-9]+}}]
 ; LS64: ld.local.u8 %{{.*}}, [%rd{{[0-9]+}}]
 ; ALL: ret
-  %a = load i8, i8 addrspace(5)* %ptr
+  %a = load i8, ptr addrspace(5) %ptr
   ret i8 %a
 }
 
 ;; i16
-define i16 @ld_global_i16(i16 addrspace(1)* %ptr) {
+define i16 @ld_global_i16(ptr addrspace(1) %ptr) {
 ; ALL-LABEL: ld_global_i16
 ; G32: ld.global.u16 %{{.*}}, [%r{{[0-9]+}}]
 ; G64: ld.global.u16 %{{.*}}, [%rd{{[0-9]+}}]
 ; ALL: ret
-  %a = load i16, i16 addrspace(1)* %ptr
+  %a = load i16, ptr addrspace(1) %ptr
   ret i16 %a
 }
-define i16 @ld_shared_i16(i16 addrspace(3)* %ptr) {
+define i16 @ld_shared_i16(ptr addrspace(3) %ptr) {
 ; ALL-LABEL: ld_shared_i16
 ; LS32: ld.shared.u16 %{{.*}}, [%r{{[0-9]+}}]
 ; LS64: ld.shared.u16 %{{.*}}, [%rd{{[0-9]+}}]
 ; ALL: ret
-  %a = load i16, i16 addrspace(3)* %ptr
+  %a = load i16, ptr addrspace(3) %ptr
   ret i16 %a
 }
-define i16 @ld_local_i16(i16 addrspace(5)* %ptr) {
+define i16 @ld_local_i16(ptr addrspace(5) %ptr) {
 ; ALL-LABEL: ld_local_i16
 ; LS32: ld.local.u16 %{{.*}}, [%r{{[0-9]+}}]
 ; LS64: ld.local.u16 %{{.*}}, [%rd{{[0-9]+}}]
 ; ALL: ret
-  %a = load i16, i16 addrspace(5)* %ptr
+  %a = load i16, ptr addrspace(5) %ptr
   ret i16 %a
 }
 
 ;; i32
-define i32 @ld_global_i32(i32 addrspace(1)* %ptr) {
+define i32 @ld_global_i32(ptr addrspace(1) %ptr) {
 ; ALL-LABEL: ld_global_i32
 ; G32: ld.global.u32 %{{.*}}, [%r{{[0-9]+}}]
 ; G64: ld.global.u32 %{{.*}}, [%rd{{[0-9]+}}]
 ; ALL: ret
-  %a = load i32, i32 addrspace(1)* %ptr
+  %a = load i32, ptr addrspace(1) %ptr
   ret i32 %a
 }
-define i32 @ld_shared_i32(i32 addrspace(3)* %ptr) {
+define i32 @ld_shared_i32(ptr addrspace(3) %ptr) {
 ; ALL-LABEL: ld_shared_i32
 ; LS32: ld.shared.u32 %{{.*}}, [%r{{[0-9]+}}]
 ; LS64: ld.shared.u32 %{{.*}}, [%rd{{[0-9]+}}]
 ; PTX64: ret
-  %a = load i32, i32 addrspace(3)* %ptr
+  %a = load i32, ptr addrspace(3) %ptr
   ret i32 %a
 }
-define i32 @ld_local_i32(i32 addrspace(5)* %ptr) {
+define i32 @ld_local_i32(ptr addrspace(5) %ptr) {
 ; ALL-LABEL: ld_local_i32
 ; LS32: ld.local.u32 %{{.*}}, [%r{{[0-9]+}}]
 ; LS64: ld.local.u32 %{{.*}}, [%rd{{[0-9]+}}]
 ; ALL: ret
-  %a = load i32, i32 addrspace(5)* %ptr
+  %a = load i32, ptr addrspace(5) %ptr
   ret i32 %a
 }
 
 ;; i64
-define i64 @ld_global_i64(i64 addrspace(1)* %ptr) {
+define i64 @ld_global_i64(ptr addrspace(1) %ptr) {
 ; ALL-LABEL: ld_global_i64
 ; G32: ld.global.u64 %{{.*}}, [%r{{[0-9]+}}]
 ; G64: ld.global.u64 %{{.*}}, [%rd{{[0-9]+}}]
 ; ALL: ret
-  %a = load i64, i64 addrspace(1)* %ptr
+  %a = load i64, ptr addrspace(1) %ptr
   ret i64 %a
 }
-define i64 @ld_shared_i64(i64 addrspace(3)* %ptr) {
+define i64 @ld_shared_i64(ptr addrspace(3) %ptr) {
 ; ALL-LABEL: ld_shared_i64
 ; LS32: ld.shared.u64 %{{.*}}, [%r{{[0-9]+}}]
 ; LS64: ld.shared.u64 %{{.*}}, [%rd{{[0-9]+}}]
 ; ALL: ret
-  %a = load i64, i64 addrspace(3)* %ptr
+  %a = load i64, ptr addrspace(3) %ptr
   ret i64 %a
 }
-define i64 @ld_local_i64(i64 addrspace(5)* %ptr) {
+define i64 @ld_local_i64(ptr addrspace(5) %ptr) {
 ; ALL-LABEL: ld_local_i64
 ; LS32: ld.local.u64 %{{.*}}, [%r{{[0-9]+}}]
 ; LS64: ld.local.u64 %{{.*}}, [%rd{{[0-9]+}}]
 ; ALL: ret
-  %a = load i64, i64 addrspace(5)* %ptr
+  %a = load i64, ptr addrspace(5) %ptr
   ret i64 %a
 }
 
 ;; f32
-define float @ld_global_f32(float addrspace(1)* %ptr) {
+define float @ld_global_f32(ptr addrspace(1) %ptr) {
 ; ALL-LABEL: ld_global_f32
 ; G32: ld.global.f32 %{{.*}}, [%r{{[0-9]+}}]
 ; G64: ld.global.f32 %{{.*}}, [%rd{{[0-9]+}}]
 ; ALL: ret
-  %a = load float, float addrspace(1)* %ptr
+  %a = load float, ptr addrspace(1) %ptr
   ret float %a
 }
-define float @ld_shared_f32(float addrspace(3)* %ptr) {
+define float @ld_shared_f32(ptr addrspace(3) %ptr) {
 ; ALL-LABEL: ld_shared_f32
 ; LS32: ld.shared.f32 %{{.*}}, [%r{{[0-9]+}}]
 ; LS64: ld.shared.f32 %{{.*}}, [%rd{{[0-9]+}}]
 ; ALL: ret
-  %a = load float, float addrspace(3)* %ptr
+  %a = load float, ptr addrspace(3) %ptr
   ret float %a
 }
-define float @ld_local_f32(float addrspace(5)* %ptr) {
+define float @ld_local_f32(ptr addrspace(5) %ptr) {
 ; ALL-LABEL: ld_local_f32
 ; LS32: ld.local.f32 %{{.*}}, [%r{{[0-9]+}}]
 ; LS64: ld.local.f32 %{{.*}}, [%rd{{[0-9]+}}]
 ; ALL: ret
-  %a = load float, float addrspace(5)* %ptr
+  %a = load float, ptr addrspace(5) %ptr
   ret float %a
 }
 
 ;; f64
-define double @ld_global_f64(double addrspace(1)* %ptr) {
+define double @ld_global_f64(ptr addrspace(1) %ptr) {
 ; ALL-LABEL: ld_global_f64
 ; G32: ld.global.f64 %{{.*}}, [%r{{[0-9]+}}]
 ; G64: ld.global.f64 %{{.*}}, [%rd{{[0-9]+}}]
 ; ALL: ret
-  %a = load double, double addrspace(1)* %ptr
+  %a = load double, ptr addrspace(1) %ptr
   ret double %a
 }
-define double @ld_shared_f64(double addrspace(3)* %ptr) {
+define double @ld_shared_f64(ptr addrspace(3) %ptr) {
 ; ALL-LABEL: ld_shared_f64
 ; LS32: ld.shared.f64 %{{.*}}, [%r{{[0-9]+}}]
 ; LS64: ld.shared.f64 %{{.*}}, [%rd{{[0-9]+}}]
 ; ALL: ret
-  %a = load double, double addrspace(3)* %ptr
+  %a = load double, ptr addrspace(3) %ptr
   ret double %a
 }
-define double @ld_local_f64(double addrspace(5)* %ptr) {
+define double @ld_local_f64(ptr addrspace(5) %ptr) {
 ; ALL-LABEL: ld_local_f64
 ; LS32: ld.local.f64 %{{.*}}, [%r{{[0-9]+}}]
 ; LS64: ld.local.f64 %{{.*}}, [%rd{{[0-9]+}}]
 ; ALL: ret
-  %a = load double, double addrspace(5)* %ptr
+  %a = load double, ptr addrspace(5) %ptr
   ret double %a
 }

diff  --git a/llvm/test/CodeGen/NVPTX/ldu-ldg.ll b/llvm/test/CodeGen/NVPTX/ldu-ldg.ll
index 66f0954c34c83..a449a1b1f713c 100644
--- a/llvm/test/CodeGen/NVPTX/ldu-ldg.ll
+++ b/llvm/test/CodeGen/NVPTX/ldu-ldg.ll
@@ -6,7 +6,7 @@ declare i8 @llvm.nvvm.ldu.global.i.i8.p1(ptr addrspace(1) %ptr, i32 %align)
 declare i16 @llvm.nvvm.ldu.global.i.i16.p1(ptr addrspace(1) %ptr, i32 %align)
 declare i32 @llvm.nvvm.ldu.global.i.i32.p1(ptr addrspace(1) %ptr, i32 %align)
 declare i64 @llvm.nvvm.ldu.global.i.i64.p1(ptr addrspace(1) %ptr, i32 %align)
-declare ptr @llvm.nvvm.ldu.global.p.p1i8(ptr addrspace(1) %ptr, i32 %align)
+declare ptr @llvm.nvvm.ldu.global.p.p1(ptr addrspace(1) %ptr, i32 %align)
 declare float @llvm.nvvm.ldu.global.f.f32.p1(ptr addrspace(1) %ptr, i32 %align)
 declare double @llvm.nvvm.ldu.global.f.f64.p1(ptr addrspace(1) %ptr, i32 %align)
 declare half @llvm.nvvm.ldu.global.f.f16.p1(ptr addrspace(1) %ptr, i32 %align)
@@ -16,7 +16,7 @@ declare i8 @llvm.nvvm.ldg.global.i.i8.p1(ptr addrspace(1) %ptr, i32 %align)
 declare i16 @llvm.nvvm.ldg.global.i.i16.p1(ptr addrspace(1) %ptr, i32 %align)
 declare i32 @llvm.nvvm.ldg.global.i.i32.p1(ptr addrspace(1) %ptr, i32 %align)
 declare i64 @llvm.nvvm.ldg.global.i.i64.p1(ptr addrspace(1) %ptr, i32 %align)
-declare ptr @llvm.nvvm.ldg.global.p.p1i8(ptr addrspace(1) %ptr, i32 %align)
+declare ptr @llvm.nvvm.ldg.global.p.p1(ptr addrspace(1) %ptr, i32 %align)
 declare float @llvm.nvvm.ldg.global.f.f32.p1(ptr addrspace(1) %ptr, i32 %align)
 declare double @llvm.nvvm.ldg.global.f.f64.p1(ptr addrspace(1) %ptr, i32 %align)
 declare half @llvm.nvvm.ldg.global.f.f16.p1(ptr addrspace(1) %ptr, i32 %align)
@@ -53,7 +53,7 @@ define i64 @test_ldu_i64(ptr addrspace(1) %ptr) {
 ; CHECK-LABEL: test_ldu_p
 define ptr @test_ldu_p(ptr addrspace(1) %ptr) {
   ; CHECK: ldu.global.u64
-  %val = tail call ptr @llvm.nvvm.ldu.global.p.p1i8(ptr addrspace(1) %ptr, i32 8)
+  %val = tail call ptr @llvm.nvvm.ldu.global.p.p1(ptr addrspace(1) %ptr, i32 8)
   ret ptr %val
 }
 
@@ -117,7 +117,7 @@ define i64 @test_ldg_i64(ptr addrspace(1) %ptr) {
 ; CHECK-LABEL: test_ldg_p
 define ptr @test_ldg_p(ptr addrspace(1) %ptr) {
   ; CHECK: ld.global.nc.u64
-  %val = tail call ptr @llvm.nvvm.ldg.global.p.p1i8(ptr addrspace(1) %ptr, i32 8)
+  %val = tail call ptr @llvm.nvvm.ldg.global.p.p1(ptr addrspace(1) %ptr, i32 8)
   ret ptr %val
 }
 

diff  --git a/llvm/test/CodeGen/NVPTX/noreturn.ll b/llvm/test/CodeGen/NVPTX/noreturn.ll
index 86cec0a37d9bf..e7022d25da651 100644
--- a/llvm/test/CodeGen/NVPTX/noreturn.ll
+++ b/llvm/test/CodeGen/NVPTX/noreturn.ll
@@ -1,7 +1,7 @@
 ; RUN: llc < %s -march=nvptx64 -mattr=+ptx64 -mcpu=sm_30 | FileCheck %s
 ; RUN: %if ptxas %{llc < %s -march=nvptx64 -mattr=+ptx60 -mcpu=sm_30 | %ptxas-verify %}
 
- at function_pointer = addrspace(1) global void (i32)* null
+ at function_pointer = addrspace(1) global ptr null
 
 ; CHECK: .func trap_wrapper
 ; CHECK-NEXT: ()
@@ -36,9 +36,9 @@ define void @ignore_kernel_noreturn() #0 {
 ; CHECK: prototype_{{[0-9]+}} : .callprototype (.param .b32 _) _ (.param .b32 _);
 
 define void @callprototype_noreturn(i32) {
-  %fn = load void (i32)*, void (i32)* addrspace(1)* @function_pointer
+  %fn = load ptr, ptr addrspace(1) @function_pointer
   call void %fn(i32 %0) #0
-  %non_void = bitcast void (i32)* %fn to i32 (i32)*
+  %non_void = bitcast ptr %fn to ptr
   %2 = call i32 %non_void(i32 %0) #0
   ret void
 }
@@ -47,5 +47,5 @@ attributes #0 = { noreturn }
 
 !nvvm.annotations = !{!0, !1}
 
-!0 = !{void ()* @ignore_kernel_noreturn, !"kernel", i32 1}
-!1 = !{void (i32)* @callprototype_noreturn, !"kernel", i32 1}
+!0 = !{ptr @ignore_kernel_noreturn, !"kernel", i32 1}
+!1 = !{ptr @callprototype_noreturn, !"kernel", i32 1}

diff  --git a/llvm/test/CodeGen/NVPTX/nvvm-reflect-ocl.ll b/llvm/test/CodeGen/NVPTX/nvvm-reflect-ocl.ll
index 2c9ea4742de84..9d383218dce86 100644
--- a/llvm/test/CodeGen/NVPTX/nvvm-reflect-ocl.ll
+++ b/llvm/test/CodeGen/NVPTX/nvvm-reflect-ocl.ll
@@ -12,7 +12,7 @@ declare i32 @__nvvm_reflect_ocl(ptr addrspace(4) noundef)
 ; COMMON-LABEL: @foo
 define i32 @foo(float %a, float %b) {
 ; COMMON-NOT: call i32 @__nvvm_reflect_ocl
-  %reflect = tail call i32 @__nvvm_reflect_ocl(ptr addrspace(4) noundef getelementptr inbounds ([12 x i8], [12 x i8] addrspace(4)* @"$str", i64 0, i64 0))
+  %reflect = tail call i32 @__nvvm_reflect_ocl(ptr addrspace(4) noundef @"$str")
 ; SM20: ret i32 200
 ; SM35: ret i32 350
   ret i32 %reflect

diff  --git a/llvm/test/CodeGen/NVPTX/nvvm-reflect-opaque.ll b/llvm/test/CodeGen/NVPTX/nvvm-reflect-opaque.ll
index 9199bf6ae12a9..1cb5c87fae826 100644
--- a/llvm/test/CodeGen/NVPTX/nvvm-reflect-opaque.ll
+++ b/llvm/test/CodeGen/NVPTX/nvvm-reflect-opaque.ll
@@ -14,12 +14,12 @@
 @str = private unnamed_addr addrspace(4) constant [11 x i8] c"__CUDA_FTZ\00"
 
 declare i32 @__nvvm_reflect(ptr)
-declare ptr @llvm.nvvm.ptr.constant.to.gen.p0i8.p4i8(ptr addrspace(4))
+declare ptr @llvm.nvvm.ptr.constant.to.gen.p0.p4(ptr addrspace(4))
 
 ; CHECK-LABEL: @foo
 define float @foo(float %a, float %b) {
 ; CHECK-NOT: call i32 @__nvvm_reflect
-  %ptr = tail call ptr @llvm.nvvm.ptr.constant.to.gen.p0i8.p4i8(ptr addrspace(4) @str)
+  %ptr = tail call ptr @llvm.nvvm.ptr.constant.to.gen.p0.p4(ptr addrspace(4) @str)
   %reflect = tail call i32 @__nvvm_reflect(ptr %ptr)
   %cmp = icmp ugt i32 %reflect, 0
   br i1 %cmp, label %use_mul, label %use_add
@@ -41,15 +41,15 @@ exit:
   ret float %ret
 }
 
-declare i32 @llvm.nvvm.reflect.p0i8(ptr)
+declare i32 @llvm.nvvm.reflect.p0(ptr)
 
 ; CHECK-LABEL: define noundef i32 @intrinsic
 define i32 @intrinsic() {
 ; CHECK-NOT: call i32 @llvm.nvvm.reflect
 ; USE_FTZ_0: ret i32 0
 ; USE_FTZ_1: ret i32 1
-  %ptr = tail call ptr @llvm.nvvm.ptr.constant.to.gen.p0i8.p4i8(ptr addrspace(4) @str)
-  %reflect = tail call i32 @llvm.nvvm.reflect.p0i8(ptr %ptr)
+  %ptr = tail call ptr @llvm.nvvm.ptr.constant.to.gen.p0.p4(ptr addrspace(4) @str)
+  %reflect = tail call i32 @llvm.nvvm.reflect.p0(ptr %ptr)
   ret i32 %reflect
 }
 

diff  --git a/llvm/test/CodeGen/NVPTX/short-ptr.ll b/llvm/test/CodeGen/NVPTX/short-ptr.ll
index c6f9106af8b20..3d8fdeca24fbe 100644
--- a/llvm/test/CodeGen/NVPTX/short-ptr.ll
+++ b/llvm/test/CodeGen/NVPTX/short-ptr.ll
@@ -9,36 +9,36 @@
 ; CHECK-DEFAULT: .visible .shared .align 8 .u64 s
 ; CHECK-DEFAULT-32: .visible .shared .align 8 .u32 s
 ; CHECK-SHORT-SHARED: .visible .shared .align 8 .u32 s
- at s = local_unnamed_addr addrspace(3) global i32 addrspace(3)* null, align 8
+ at s = local_unnamed_addr addrspace(3) global ptr addrspace(3) null, align 8
 
 ; CHECK-DEFAULT: .visible .const .align 8 .u64 c
 ; CHECK-DEFAULT-32: .visible .const .align 8 .u32 c
 ; CHECK-SHORT-CONST: .visible .const .align 8 .u32 c
- at c = local_unnamed_addr addrspace(4) global i32 addrspace(4)* null, align 8
+ at c = local_unnamed_addr addrspace(4) global ptr addrspace(4) null, align 8
 
 declare void @use(i8 %arg);
 
 ; CHECK-DEFAULT: .param .b64 test1_param_0
 ; CHECK-DEFAULT-32: .param .b32 test1_param_0
 ; CHECK-SHORT-LOCAL: .param .b32 test1_param_0
-define void @test1(i8 addrspace(5)* %local) {
+define void @test1(ptr addrspace(5) %local) {
   ; CHECK-DEFAULT: ld.param.u64 %rd{{.*}}, [test1_param_0];
   ; CHECK-DEFAULT-32:  ld.param.u32 %r{{.*}}, [test1_param_0];
   ; CHECK-SHORT-LOCAL: ld.param.u32 %r{{.*}}, [test1_param_0];
-  %v = load i8, i8 addrspace(5)* %local
+  %v = load i8, ptr addrspace(5) %local
   call void @use(i8 %v)
   ret void
 }
 
 define void @test2() {
   %v = alloca i8
-  %cast = addrspacecast i8* %v to i8 addrspace(5)*
+  %cast = addrspacecast ptr %v to ptr addrspace(5)
   ; CHECK-DEFAULT: .param .b64 param0;
   ; CHECK-DEFAULT: st.param.b64
   ; CHECK-DEFAULT-32: .param .b32 param0;
   ; CHECK-DEFAULT-32: st.param.b32
   ; CHECK-SHORT-LOCAL: .param .b32 param0;
   ; CHECK-SHORT-LOCAL: st.param.b32
-  call void @test1(i8 addrspace(5)* %cast)
+  call void @test1(ptr addrspace(5) %cast)
   ret void
 }

diff  --git a/llvm/test/CodeGen/NVPTX/st-addrspace.ll b/llvm/test/CodeGen/NVPTX/st-addrspace.ll
index 525a837801073..3159a250cdab4 100644
--- a/llvm/test/CodeGen/NVPTX/st-addrspace.ll
+++ b/llvm/test/CodeGen/NVPTX/st-addrspace.ll
@@ -7,156 +7,156 @@
 
 ;; i8
 ; ALL-LABEL: st_global_i8
-define void @st_global_i8(i8 addrspace(1)* %ptr, i8 %a) {
+define void @st_global_i8(ptr addrspace(1) %ptr, i8 %a) {
 ; G32: st.global.u8 [%r{{[0-9]+}}], %rs{{[0-9]+}}
 ; G64: st.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
 ; ALL: ret
-  store i8 %a, i8 addrspace(1)* %ptr
+  store i8 %a, ptr addrspace(1) %ptr
   ret void
 }
 ; ALL-LABEL: st_shared_i8
-define void @st_shared_i8(i8 addrspace(3)* %ptr, i8 %a) {
+define void @st_shared_i8(ptr addrspace(3) %ptr, i8 %a) {
 ; LS32: st.shared.u8 [%r{{[0-9]+}}], %rs{{[0-9]+}}
 ; LS64: st.shared.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
 ; ALL: ret
-  store i8 %a, i8 addrspace(3)* %ptr
+  store i8 %a, ptr addrspace(3) %ptr
   ret void
 }
 ; ALL-LABEL: st_local_i8
-define void @st_local_i8(i8 addrspace(5)* %ptr, i8 %a) {
+define void @st_local_i8(ptr addrspace(5) %ptr, i8 %a) {
 ; LS32: st.local.u8 [%r{{[0-9]+}}], %rs{{[0-9]+}}
 ; LS64: st.local.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
 ; ALL: ret
-  store i8 %a, i8 addrspace(5)* %ptr
+  store i8 %a, ptr addrspace(5) %ptr
   ret void
 }
 
 ;; i16
 ; ALL-LABEL: st_global_i16
-define void @st_global_i16(i16 addrspace(1)* %ptr, i16 %a) {
+define void @st_global_i16(ptr addrspace(1) %ptr, i16 %a) {
 ; G32: st.global.u16 [%r{{[0-9]+}}], %rs{{[0-9]+}}
 ; G64: st.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
 ; ALL: ret
-  store i16 %a, i16 addrspace(1)* %ptr
+  store i16 %a, ptr addrspace(1) %ptr
   ret void
 }
 ; ALL-LABEL: st_shared_i16
-define void @st_shared_i16(i16 addrspace(3)* %ptr, i16 %a) {
+define void @st_shared_i16(ptr addrspace(3) %ptr, i16 %a) {
 ; LS32: st.shared.u16 [%r{{[0-9]+}}], %rs{{[0-9]+}}
 ; LS64: st.shared.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
 ; ALL: ret
-  store i16 %a, i16 addrspace(3)* %ptr
+  store i16 %a, ptr addrspace(3) %ptr
   ret void
 }
 ; ALL-LABEL: st_local_i16
-define void @st_local_i16(i16 addrspace(5)* %ptr, i16 %a) {
+define void @st_local_i16(ptr addrspace(5) %ptr, i16 %a) {
 ; LS32: st.local.u16 [%r{{[0-9]+}}], %rs{{[0-9]+}}
 ; LS64: st.local.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
 ; ALL: ret
-  store i16 %a, i16 addrspace(5)* %ptr
+  store i16 %a, ptr addrspace(5) %ptr
   ret void
 }
 
 ;; i32
 ; ALL-LABEL: st_global_i32
-define void @st_global_i32(i32 addrspace(1)* %ptr, i32 %a) {
+define void @st_global_i32(ptr addrspace(1) %ptr, i32 %a) {
 ; G32: st.global.u32 [%r{{[0-9]+}}], %r{{[0-9]+}}
 ; G64: st.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
 ; ALL: ret
-  store i32 %a, i32 addrspace(1)* %ptr
+  store i32 %a, ptr addrspace(1) %ptr
   ret void
 }
 ; ALL-LABEL: st_shared_i32
-define void @st_shared_i32(i32 addrspace(3)* %ptr, i32 %a) {
+define void @st_shared_i32(ptr addrspace(3) %ptr, i32 %a) {
 ; LS32: st.shared.u32 [%r{{[0-9]+}}], %r{{[0-9]+}}
 ; LS64: st.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
 ; PTX64: ret
-  store i32 %a, i32 addrspace(3)* %ptr
+  store i32 %a, ptr addrspace(3) %ptr
   ret void
 }
 ; ALL-LABEL: st_local_i32
-define void @st_local_i32(i32 addrspace(5)* %ptr, i32 %a) {
+define void @st_local_i32(ptr addrspace(5) %ptr, i32 %a) {
 ; LS32: st.local.u32 [%r{{[0-9]+}}], %r{{[0-9]+}}
 ; LS64: st.local.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
 ; ALL: ret
-  store i32 %a, i32 addrspace(5)* %ptr
+  store i32 %a, ptr addrspace(5) %ptr
   ret void
 }
 
 ;; i64
 ; ALL-LABEL: st_global_i64
-define void @st_global_i64(i64 addrspace(1)* %ptr, i64 %a) {
+define void @st_global_i64(ptr addrspace(1) %ptr, i64 %a) {
 ; G32: st.global.u64 [%r{{[0-9]+}}], %rd{{[0-9]+}}
 ; G64: st.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
 ; ALL: ret
-  store i64 %a, i64 addrspace(1)* %ptr
+  store i64 %a, ptr addrspace(1) %ptr
   ret void
 }
 ; ALL-LABEL: st_shared_i64
-define void @st_shared_i64(i64 addrspace(3)* %ptr, i64 %a) {
+define void @st_shared_i64(ptr addrspace(3) %ptr, i64 %a) {
 ; LS32: st.shared.u64 [%r{{[0-9]+}}], %rd{{[0-9]+}}
 ; LS64: st.shared.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
 ; ALL: ret
-  store i64 %a, i64 addrspace(3)* %ptr
+  store i64 %a, ptr addrspace(3) %ptr
   ret void
 }
 ; ALL-LABEL: st_local_i64
-define void @st_local_i64(i64 addrspace(5)* %ptr, i64 %a) {
+define void @st_local_i64(ptr addrspace(5) %ptr, i64 %a) {
 ; LS32: st.local.u64 [%r{{[0-9]+}}], %rd{{[0-9]+}}
 ; LS64: st.local.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
 ; ALL: ret
-  store i64 %a, i64 addrspace(5)* %ptr
+  store i64 %a, ptr addrspace(5) %ptr
   ret void
 }
 
 ;; f32
 ; ALL-LABEL: st_global_f32
-define void @st_global_f32(float addrspace(1)* %ptr, float %a) {
+define void @st_global_f32(ptr addrspace(1) %ptr, float %a) {
 ; G32: st.global.f32 [%r{{[0-9]+}}], %f{{[0-9]+}}
 ; G64: st.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
 ; ALL: ret
-  store float %a, float addrspace(1)* %ptr
+  store float %a, ptr addrspace(1) %ptr
   ret void
 }
 ; ALL-LABEL: st_shared_f32
-define void @st_shared_f32(float addrspace(3)* %ptr, float %a) {
+define void @st_shared_f32(ptr addrspace(3) %ptr, float %a) {
 ; LS32: st.shared.f32 [%r{{[0-9]+}}], %f{{[0-9]+}}
 ; LS64: st.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
 ; ALL: ret
-  store float %a, float addrspace(3)* %ptr
+  store float %a, ptr addrspace(3) %ptr
   ret void
 }
 ; ALL-LABEL: st_local_f32
-define void @st_local_f32(float addrspace(5)* %ptr, float %a) {
+define void @st_local_f32(ptr addrspace(5) %ptr, float %a) {
 ; LS32: st.local.f32 [%r{{[0-9]+}}], %f{{[0-9]+}}
 ; LS64: st.local.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
 ; ALL: ret
-  store float %a, float addrspace(5)* %ptr
+  store float %a, ptr addrspace(5) %ptr
   ret void
 }
 
 ;; f64
 ; ALL-LABEL: st_global_f64
-define void @st_global_f64(double addrspace(1)* %ptr, double %a) {
+define void @st_global_f64(ptr addrspace(1) %ptr, double %a) {
 ; G32: st.global.f64 [%r{{[0-9]+}}], %fd{{[0-9]+}}
 ; G64: st.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
 ; ALL: ret
-  store double %a, double addrspace(1)* %ptr
+  store double %a, ptr addrspace(1) %ptr
   ret void
 }
 ; ALL-LABEL: st_shared_f64
-define void @st_shared_f64(double addrspace(3)* %ptr, double %a) {
+define void @st_shared_f64(ptr addrspace(3) %ptr, double %a) {
 ; LS32: st.shared.f64 [%r{{[0-9]+}}], %fd{{[0-9]+}}
 ; LS64: st.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
 ; ALL: ret
-  store double %a, double addrspace(3)* %ptr
+  store double %a, ptr addrspace(3) %ptr
   ret void
 }
 ; ALL-LABEL: st_local_f64
-define void @st_local_f64(double addrspace(5)* %ptr, double %a) {
+define void @st_local_f64(ptr addrspace(5) %ptr, double %a) {
 ; LS32: st.local.f64 [%r{{[0-9]+}}], %fd{{[0-9]+}}
 ; LS64: st.local.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
 ; ALL: ret
-  store double %a, double addrspace(5)* %ptr
+  store double %a, ptr addrspace(5) %ptr
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/2007-11-04-CoalescerCrash.ll b/llvm/test/CodeGen/PowerPC/2007-11-04-CoalescerCrash.ll
index 460c1338d3424..ba698b0735358 100644
--- a/llvm/test/CodeGen/PowerPC/2007-11-04-CoalescerCrash.ll
+++ b/llvm/test/CodeGen/PowerPC/2007-11-04-CoalescerCrash.ll
@@ -2,9 +2,9 @@
 
 	%struct.HDescriptor = type <{ i32, i32 }>
 
-declare void @bcopy(i8*, i8*, i32)
+declare void @bcopy(ptr, ptr, i32)
 
-define i32 @main(i32 %argc, i8** %argv) {
+define i32 @main(i32 %argc, ptr %argv) {
 entry:
 	br i1 false, label %bb31, label %bb
 
@@ -86,7 +86,7 @@ free.i:		; preds = %cond_next21.i
 
 cond_next934:		; preds = %bb1005, %cond_next21.i
 	%listsize.1 = phi i32 [ 0, %bb1005 ], [ 64, %cond_next21.i ]		; <i32> [#uses=1]
-	%catalogExtents.2 = phi %struct.HDescriptor* [ %catalogExtents.1.reg2mem.1, %bb1005 ], [ null, %cond_next21.i ]		; <%struct.HDescriptor*> [#uses=3]
+	%catalogExtents.2 = phi ptr [ %catalogExtents.1.reg2mem.1, %bb1005 ], [ null, %cond_next21.i ]		; <%struct.HDescriptor*> [#uses=3]
 	br i1 false, label %cond_next942, label %Return1020
 
 cond_next942:		; preds = %cond_next934
@@ -94,27 +94,27 @@ cond_next942:		; preds = %cond_next934
 
 bb947:		; preds = %cond_next971, %cond_next942
 	%indvar = phi i32 [ 0, %cond_next942 ], [ %indvar.next2140, %cond_next971 ]		; <i32> [#uses=2]
-	%catalogExtents.1.reg2mem.0 = phi %struct.HDescriptor* [ %catalogExtents.2, %cond_next942 ], [ %tmp977978, %cond_next971 ]		; <%struct.HDescriptor*> [#uses=1]
-	%extents.0.reg2mem.0 = phi %struct.HDescriptor* [ null, %cond_next942 ], [ %tmp977978, %cond_next971 ]		; <%struct.HDescriptor*> [#uses=1]
+	%catalogExtents.1.reg2mem.0 = phi ptr [ %catalogExtents.2, %cond_next942 ], [ %tmp977978, %cond_next971 ]		; <%struct.HDescriptor*> [#uses=1]
+	%extents.0.reg2mem.0 = phi ptr [ null, %cond_next942 ], [ %tmp977978, %cond_next971 ]		; <%struct.HDescriptor*> [#uses=1]
 	br i1 false, label %cond_next971, label %Return1020
 
 cond_next971:		; preds = %bb947
 	%tmp = shl i32 %indvar, 6		; <i32> [#uses=1]
 	%listsize.0.reg2mem.0 = add i32 %tmp, %listsize.1		; <i32> [#uses=1]
 	%tmp973 = add i32 %listsize.0.reg2mem.0, 64		; <i32> [#uses=1]
-	%tmp974975 = bitcast %struct.HDescriptor* %extents.0.reg2mem.0 to i8*		; <i8*> [#uses=1]
-	%tmp977 = call i8* @realloc( i8* %tmp974975, i32 %tmp973 )		; <i8*> [#uses=1]
-	%tmp977978 = bitcast i8* %tmp977 to %struct.HDescriptor*		; <%struct.HDescriptor*> [#uses=3]
-	call void @bcopy( i8* null, i8* null, i32 64 )
+	%tmp974975 = bitcast ptr %extents.0.reg2mem.0 to ptr		; <i8*> [#uses=1]
+	%tmp977 = call ptr @realloc( ptr %tmp974975, i32 %tmp973 )		; <i8*> [#uses=1]
+	%tmp977978 = bitcast ptr %tmp977 to ptr		; <%struct.HDescriptor*> [#uses=3]
+	call void @bcopy( ptr null, ptr null, i32 64 )
 	%indvar.next2140 = add i32 %indvar, 1		; <i32> [#uses=1]
 	br i1 false, label %bb1005, label %bb947
 
 bb1005:		; preds = %cond_next971, %cond_next942
-	%catalogExtents.1.reg2mem.1 = phi %struct.HDescriptor* [ %catalogExtents.2, %cond_next942 ], [ %tmp977978, %cond_next971 ]		; <%struct.HDescriptor*> [#uses=2]
+	%catalogExtents.1.reg2mem.1 = phi ptr [ %catalogExtents.2, %cond_next942 ], [ %tmp977978, %cond_next971 ]		; <%struct.HDescriptor*> [#uses=2]
 	br i1 false, label %Return1020, label %cond_next934
 
 Return1020:		; preds = %bb1005, %bb947, %cond_next934
-	%catalogExtents.3 = phi %struct.HDescriptor* [ %catalogExtents.1.reg2mem.0, %bb947 ], [ %catalogExtents.2, %cond_next934 ], [ %catalogExtents.1.reg2mem.1, %bb1005 ]		; <%struct.HDescriptor*> [#uses=0]
+	%catalogExtents.3 = phi ptr [ %catalogExtents.1.reg2mem.0, %bb947 ], [ %catalogExtents.2, %cond_next934 ], [ %catalogExtents.1.reg2mem.1, %bb1005 ]		; <%struct.HDescriptor*> [#uses=0]
 	ret i32 0
 
 cond_true1192:		; preds = %cond_next807
@@ -145,4 +145,4 @@ AllDone:		; preds = %cond_next372, %cond_true48
 	ret i32 0
 }
 
-declare i8* @realloc(i8*, i32)
+declare ptr @realloc(ptr, i32)

diff  --git a/llvm/test/CodeGen/PowerPC/aix-alias-alignment-2.ll b/llvm/test/CodeGen/PowerPC/aix-alias-alignment-2.ll
index 209f0e9c38385..b8299148cd8d6 100644
--- a/llvm/test/CodeGen/PowerPC/aix-alias-alignment-2.ll
+++ b/llvm/test/CodeGen/PowerPC/aix-alias-alignment-2.ll
@@ -6,9 +6,9 @@
 ; RUN: llvm-objdump --syms %t.o | FileCheck --check-prefix=SYM %s
 
 @ConstVector = global <2 x i64> <i64 12, i64 34>, align 4
- at var1 = alias i64, getelementptr inbounds (<2 x i64>, <2 x i64>* @ConstVector, i32 0, i32 1)
+ at var1 = alias i64, getelementptr inbounds (<2 x i64>, ptr @ConstVector, i32 0, i32 1)
 define void @foo1(i64 %a1) {
-  store i64 %a1, i64* getelementptr inbounds (<2 x i64>, <2 x i64>* @ConstVector, i32 0, i32 1), align 4
+  store i64 %a1, ptr getelementptr inbounds (<2 x i64>, ptr @ConstVector, i32 0, i32 1), align 4
   ret void
 }
 
@@ -23,9 +23,9 @@ define void @foo1(i64 %a1) {
 ; ASM-NEXT:      .vbyte     4, 34
 
 @ConstDataSeq = global [2 x i64] [i64 12, i64 34], align 4
- at var2 = alias i64, getelementptr inbounds ([2 x i64], [2 x i64]* @ConstDataSeq, i32 0, i32 1)
+ at var2 = alias i64, getelementptr inbounds ([2 x i64], ptr @ConstDataSeq, i32 0, i32 1)
 define void @foo2(i64 %a1) {
-  store i64 %a1, i64* getelementptr inbounds ([2 x i64], [2 x i64]* @ConstDataSeq, i32 0, i32 1), align 4
+  store i64 %a1, ptr getelementptr inbounds ([2 x i64], ptr @ConstDataSeq, i32 0, i32 1), align 4
   ret void
 }
 
@@ -41,9 +41,9 @@ define void @foo2(i64 %a1) {
 
 %struct.B = type { i64 }
 @ConstArray = global [2 x %struct.B] [%struct.B {i64 12}, %struct.B {i64 34}], align 4
- at var3 = alias %struct.B, getelementptr inbounds ([2 x %struct.B], [2 x %struct.B]* @ConstArray, i32 0, i32 0)
+ at var3 = alias %struct.B, ptr @ConstArray
 define void @foo3(%struct.B %a1) {
-  store %struct.B %a1, %struct.B* getelementptr inbounds ([2 x %struct.B], [2 x %struct.B]* @ConstArray, i32 0, i32 1), align 4
+  store %struct.B %a1, ptr getelementptr inbounds ([2 x %struct.B], ptr @ConstArray, i32 0, i32 1), align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/aix-alias-alignment.ll b/llvm/test/CodeGen/PowerPC/aix-alias-alignment.ll
index a1ad80481adf1..8bfc5139aa747 100644
--- a/llvm/test/CodeGen/PowerPC/aix-alias-alignment.ll
+++ b/llvm/test/CodeGen/PowerPC/aix-alias-alignment.ll
@@ -10,12 +10,12 @@
 ; RUN: llvm-objdump --syms %t.o | FileCheck --check-prefix=SYM %s
 
 @_MergedGlobals = global <{ i32, i32 }> <{ i32 1, i32 2 }>, align 4
- at var1 = alias i32, getelementptr inbounds (<{ i32, i32 }>, ptr @_MergedGlobals, i32 0, i32 0)
+ at var1 = alias i32, ptr @_MergedGlobals
 @var2 = alias i32, getelementptr inbounds (<{ i32, i32 }>, ptr @_MergedGlobals, i32 0, i32 1)
 @var3 = alias i32, ptr @var2
 
 define void @foo(i32 %a1, i32 %a2, i32 %a3) {
-  store i32 %a1, ptr getelementptr inbounds (<{ i32, i32 }>, ptr @_MergedGlobals, i32 0, i32 0), align 4
+  store i32 %a1, ptr @_MergedGlobals, align 4
   store i32 %a2, ptr getelementptr inbounds (<{ i32, i32 }>, ptr @_MergedGlobals, i32 0, i32 1), align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/aix-complex.ll b/llvm/test/CodeGen/PowerPC/aix-complex.ll
index 0323a4713ab98..f2114540a57e4 100644
--- a/llvm/test/CodeGen/PowerPC/aix-complex.ll
+++ b/llvm/test/CodeGen/PowerPC/aix-complex.ll
@@ -31,7 +31,7 @@ entry:
   %call = call { double, double } @dblCmplxRetCallee()
   %0 = extractvalue { double, double } %call, 0
   %1 = extractvalue { double, double } %call, 1
-  store double %0, ptr getelementptr inbounds ({ double, double }, ptr @gcd, i32 0, i32 0), align 8
+  store double %0, ptr @gcd, align 8
   store double %1, ptr getelementptr inbounds ({ double, double }, ptr @gcd, i32 0, i32 1), align 8
   call void @anchor()
   ret void
@@ -68,7 +68,7 @@ entry:
   %call = call { float, float } @fltCmplxRetCallee()
   %0 = extractvalue { float, float } %call, 0
   %1 = extractvalue { float, float } %call, 1
-  store float %0, ptr getelementptr inbounds ({ float, float }, ptr @gcf, i32 0, i32 0), align 4
+  store float %0, ptr @gcf, align 4
   store float %1, ptr getelementptr inbounds ({ float, float }, ptr @gcf, i32 0, i32 1), align 4
   call void @anchor()
   ret void
@@ -107,7 +107,7 @@ entry:
   %call = call { ppc_fp128, ppc_fp128 } @fp128CmplxRetCallee()
   %0 = extractvalue { ppc_fp128, ppc_fp128 } %call, 0
   %1 = extractvalue { ppc_fp128, ppc_fp128 } %call, 1
-  store ppc_fp128 %0, ptr getelementptr inbounds ({ ppc_fp128, ppc_fp128 }, ptr @gcfp128, i32 0, i32 0), align 16
+  store ppc_fp128 %0, ptr @gcfp128, align 16
   store ppc_fp128 %1, ptr getelementptr inbounds ({ ppc_fp128, ppc_fp128 }, ptr @gcfp128, i32 0, i32 1), align 16
   call void @anchor()
   ret void

diff  --git a/llvm/test/CodeGen/PowerPC/aix-tls-gd-target-flags.ll b/llvm/test/CodeGen/PowerPC/aix-tls-gd-target-flags.ll
index f55932c3088de..149fa9d00923d 100644
--- a/llvm/test/CodeGen/PowerPC/aix-tls-gd-target-flags.ll
+++ b/llvm/test/CodeGen/PowerPC/aix-tls-gd-target-flags.ll
@@ -21,8 +21,8 @@ define signext i32 @foo() {
   ; CHECK-NEXT:   $x3 = COPY [[EXTSW_32_64_]]
   ; CHECK-NEXT:   BLR8 implicit $lr8, implicit $rm, implicit $x3
 entry:
-  %0 = load i32, i32* @a, align 4
-  %1 = load i32, i32* @b, align 4
+  %0 = load i32, ptr @a, align 4
+  %1 = load i32, ptr @b, align 4
   %add = add nsw i32 %1, %0
   ret i32 %add
 }

diff  --git a/llvm/test/CodeGen/PowerPC/block-placement.mir b/llvm/test/CodeGen/PowerPC/block-placement.mir
index fa32064ffc65d..dab8dfbb7c37c 100644
--- a/llvm/test/CodeGen/PowerPC/block-placement.mir
+++ b/llvm/test/CodeGen/PowerPC/block-placement.mir
@@ -7,34 +7,34 @@
   target triple = "powerpc64le-unknown-linux-gnu"
 
   %"class.xercesc_2_7::HashXMLCh" = type { %"class.xercesc_2_7::HashBase" }
-  %"class.xercesc_2_7::HashBase" = type { i32 (...)** }
+  %"class.xercesc_2_7::HashBase" = type { ptr }
 
-  define dso_local zeroext i1 @_ZN11xercesc_2_79HashXMLCh6equalsEPKvS2_(%"class.xercesc_2_7::HashXMLCh"* nocapture readnone %this, i8* readonly %key1, i8* readonly %key2) unnamed_addr #0 {
+  define dso_local zeroext i1 @_ZN11xercesc_2_79HashXMLCh6equalsEPKvS2_(ptr nocapture readnone %this, ptr readonly %key1, ptr readonly %key2) unnamed_addr #0 {
   entry:
-    %cmp.i = icmp eq i8* %key1, null
-    %cmp1.i = icmp eq i8* %key2, null
+    %cmp.i = icmp eq ptr %key1, null
+    %cmp1.i = icmp eq ptr %key2, null
     %or.cond.i = or i1 %cmp.i, %cmp1.i
     br i1 %or.cond.i, label %if.then.i, label %while.cond.preheader.i
 
   while.cond.preheader.i:                           ; preds = %entry
-    %0 = bitcast i8* %key2 to i16*
-    %1 = bitcast i8* %key1 to i16*
-    %2 = load i16, i16* %1, align 2
-    %3 = load i16, i16* %0, align 2
+    %0 = bitcast ptr %key2 to ptr
+    %1 = bitcast ptr %key1 to ptr
+    %2 = load i16, ptr %1, align 2
+    %3 = load i16, ptr %0, align 2
     %cmp926.i = icmp eq i16 %2, %3
     br i1 %cmp926.i, label %while.body.i.preheader, label %_ZN11xercesc_2_79XMLString6equalsEPKtS2_.exit
 
   while.body.i.preheader:                           ; preds = %while.cond.preheader.i
-    %scevgep = getelementptr i8, i8* %key2, i64 2
-    %scevgep4 = getelementptr i8, i8* %key1, i64 2
+    %scevgep = getelementptr i8, ptr %key2, i64 2
+    %scevgep4 = getelementptr i8, ptr %key1, i64 2
     br label %while.body.i
 
   if.then.i:                                        ; preds = %entry
     br i1 %cmp.i, label %lor.lhs.false3.i, label %land.lhs.true.i
 
   land.lhs.true.i:                                  ; preds = %if.then.i
-    %4 = bitcast i8* %key1 to i16*
-    %5 = load i16, i16* %4, align 2
+    %4 = bitcast ptr %key1 to ptr
+    %5 = load i16, ptr %4, align 2
     %tobool.i = icmp eq i16 %5, 0
     br i1 %tobool.i, label %lor.lhs.false3.i, label %_ZN11xercesc_2_79XMLString6equalsEPKtS2_.exit
 
@@ -42,8 +42,8 @@
     br i1 %cmp1.i, label %if.else.i, label %land.lhs.true5.i
 
   land.lhs.true5.i:                                 ; preds = %lor.lhs.false3.i
-    %6 = bitcast i8* %key2 to i16*
-    %7 = load i16, i16* %6, align 2
+    %6 = bitcast ptr %key2 to ptr
+    %7 = load i16, ptr %6, align 2
     %tobool6.i = icmp eq i16 %7, 0
     br i1 %tobool6.i, label %if.else.i, label %_ZN11xercesc_2_79XMLString6equalsEPKtS2_.exit
 
@@ -51,24 +51,24 @@
     br label %_ZN11xercesc_2_79XMLString6equalsEPKtS2_.exit
 
   while.body.i:                                     ; preds = %while.body.i.preheader, %if.end12.i
-    %lsr.iv5 = phi i8* [ %scevgep4, %while.body.i.preheader ], [ %scevgep6, %if.end12.i ]
-    %lsr.iv = phi i8* [ %scevgep, %while.body.i.preheader ], [ %scevgep2, %if.end12.i ]
+    %lsr.iv5 = phi ptr [ %scevgep4, %while.body.i.preheader ], [ %scevgep6, %if.end12.i ]
+    %lsr.iv = phi ptr [ %scevgep, %while.body.i.preheader ], [ %scevgep2, %if.end12.i ]
     %8 = phi i16 [ %15, %if.end12.i ], [ %2, %while.body.i.preheader ]
-    %9 = phi i8* [ %key1, %while.body.i.preheader ], [ %13, %if.end12.i ]
-    %10 = phi i8* [ %key2, %while.body.i.preheader ], [ %11, %if.end12.i ]
-    %11 = getelementptr i8, i8* %10, i64 2
-    %12 = bitcast i8* %11 to i16*
-    %13 = getelementptr i8, i8* %9, i64 2
-    %14 = bitcast i8* %13 to i16*
+    %9 = phi ptr [ %key1, %while.body.i.preheader ], [ %13, %if.end12.i ]
+    %10 = phi ptr [ %key2, %while.body.i.preheader ], [ %11, %if.end12.i ]
+    %11 = getelementptr i8, ptr %10, i64 2
+    %12 = bitcast ptr %11 to ptr
+    %13 = getelementptr i8, ptr %9, i64 2
+    %14 = bitcast ptr %13 to ptr
     %tobool10.i = icmp eq i16 %8, 0
     br i1 %tobool10.i, label %_ZN11xercesc_2_79XMLString6equalsEPKtS2_.exit, label %if.end12.i
 
   if.end12.i:                                       ; preds = %while.body.i
-    %15 = load i16, i16* %14, align 2
-    %16 = load i16, i16* %12, align 2
+    %15 = load i16, ptr %14, align 2
+    %16 = load i16, ptr %12, align 2
     %cmp9.i = icmp eq i16 %15, %16
-    %scevgep2 = getelementptr i8, i8* %lsr.iv, i64 2
-    %scevgep6 = getelementptr i8, i8* %lsr.iv5, i64 2
+    %scevgep2 = getelementptr i8, ptr %lsr.iv, i64 2
+    %scevgep6 = getelementptr i8, ptr %lsr.iv5, i64 2
     br i1 %cmp9.i, label %while.body.i, label %_ZN11xercesc_2_79XMLString6equalsEPKtS2_.exit
 
   _ZN11xercesc_2_79XMLString6equalsEPKtS2_.exit:    ; preds = %if.end12.i, %while.body.i, %if.else.i, %land.lhs.true5.i, %land.lhs.true.i, %while.cond.preheader.i

diff  --git a/llvm/test/CodeGen/PowerPC/convert-rr-to-ri-instrs-R0-special-handling.mir b/llvm/test/CodeGen/PowerPC/convert-rr-to-ri-instrs-R0-special-handling.mir
index fe2b392f59da2..e1d028548803f 100644
--- a/llvm/test/CodeGen/PowerPC/convert-rr-to-ri-instrs-R0-special-handling.mir
+++ b/llvm/test/CodeGen/PowerPC/convert-rr-to-ri-instrs-R0-special-handling.mir
@@ -34,32 +34,32 @@
   }
   
   ; Function Attrs: norecurse nounwind readonly
-  define i64 @unsafeLDXR3R0(i64* nocapture readonly %ptr, i64 %off) local_unnamed_addr #1 {
+  define i64 @unsafeLDXR3R0(ptr nocapture readonly %ptr, i64 %off) local_unnamed_addr #1 {
   entry:
-    %0 = bitcast i64* %ptr to i8*
-    %add.ptr = getelementptr inbounds i8, i8* %0, i64 %off
-    %1 = bitcast i8* %add.ptr to i64*
-    %2 = load i64, i64* %1, align 8, !tbaa !3
+    %0 = bitcast ptr %ptr to ptr
+    %add.ptr = getelementptr inbounds i8, ptr %0, i64 %off
+    %1 = bitcast ptr %add.ptr to ptr
+    %2 = load i64, ptr %1, align 8, !tbaa !3
     ret i64 %2
   }
   
   ; Function Attrs: norecurse nounwind readonly
-  define i64 @safeLDXZeroR3(i64* nocapture readonly %ptr, i64 %off) local_unnamed_addr #1 {
+  define i64 @safeLDXZeroR3(ptr nocapture readonly %ptr, i64 %off) local_unnamed_addr #1 {
   entry:
-    %0 = bitcast i64* %ptr to i8*
-    %add.ptr = getelementptr inbounds i8, i8* %0, i64 %off
-    %1 = bitcast i8* %add.ptr to i64*
-    %2 = load i64, i64* %1, align 8, !tbaa !3
+    %0 = bitcast ptr %ptr to ptr
+    %add.ptr = getelementptr inbounds i8, ptr %0, i64 %off
+    %1 = bitcast ptr %add.ptr to ptr
+    %2 = load i64, ptr %1, align 8, !tbaa !3
     ret i64 %2
   }
   
   ; Function Attrs: norecurse nounwind readonly
-  define i64 @safeLDXR3R0(i64* nocapture readonly %ptr, i64 %off) local_unnamed_addr #1 {
+  define i64 @safeLDXR3R0(ptr nocapture readonly %ptr, i64 %off) local_unnamed_addr #1 {
   entry:
-    %0 = bitcast i64* %ptr to i8*
-    %add.ptr = getelementptr inbounds i8, i8* %0, i64 %off
-    %1 = bitcast i8* %add.ptr to i64*
-    %2 = load i64, i64* %1, align 8, !tbaa !3
+    %0 = bitcast ptr %ptr to ptr
+    %add.ptr = getelementptr inbounds i8, ptr %0, i64 %off
+    %1 = bitcast ptr %add.ptr to ptr
+    %2 = load i64, ptr %1, align 8, !tbaa !3
     ret i64 %2
   }
   

diff  --git a/llvm/test/CodeGen/PowerPC/convert-rr-to-ri-instrs.mir b/llvm/test/CodeGen/PowerPC/convert-rr-to-ri-instrs.mir
index b62616ddead75..761316ed7726d 100644
--- a/llvm/test/CodeGen/PowerPC/convert-rr-to-ri-instrs.mir
+++ b/llvm/test/CodeGen/PowerPC/convert-rr-to-ri-instrs.mir
@@ -162,17 +162,17 @@
   }
 
   ; Function Attrs: norecurse nounwind readonly
-  define zeroext i8 @testLBZUX(i8* nocapture readonly %ptr, i32 zeroext %idx) local_unnamed_addr #1 {
+  define zeroext i8 @testLBZUX(ptr nocapture readonly %ptr, i32 zeroext %idx) local_unnamed_addr #1 {
   entry:
     %add = add i32 %idx, 1
     %idxprom = zext i32 %add to i64
-    %arrayidx = getelementptr inbounds i8, i8* %ptr, i64 %idxprom
-    %0 = load i8, i8* %arrayidx, align 1, !tbaa !3
+    %arrayidx = getelementptr inbounds i8, ptr %ptr, i64 %idxprom
+    %0 = load i8, ptr %arrayidx, align 1, !tbaa !3
     %conv = zext i8 %0 to i32
     %add1 = add i32 %idx, 2
     %idxprom2 = zext i32 %add1 to i64
-    %arrayidx3 = getelementptr inbounds i8, i8* %ptr, i64 %idxprom2
-    %1 = load i8, i8* %arrayidx3, align 1, !tbaa !3
+    %arrayidx3 = getelementptr inbounds i8, ptr %ptr, i64 %idxprom2
+    %1 = load i8, ptr %arrayidx3, align 1, !tbaa !3
     %conv4 = zext i8 %1 to i32
     %add5 = add nuw nsw i32 %conv4, %conv
     %conv6 = trunc i32 %add5 to i8
@@ -180,17 +180,17 @@
   }
 
   ; Function Attrs: norecurse nounwind readonly
-  define zeroext i8 @testLBZX(i8* nocapture readonly %ptr, i32 zeroext %idx) local_unnamed_addr #1 {
+  define zeroext i8 @testLBZX(ptr nocapture readonly %ptr, i32 zeroext %idx) local_unnamed_addr #1 {
   entry:
     %add = add i32 %idx, 1
     %idxprom = zext i32 %add to i64
-    %arrayidx = getelementptr inbounds i8, i8* %ptr, i64 %idxprom
-    %0 = load i8, i8* %arrayidx, align 1, !tbaa !3
+    %arrayidx = getelementptr inbounds i8, ptr %ptr, i64 %idxprom
+    %0 = load i8, ptr %arrayidx, align 1, !tbaa !3
     %conv = zext i8 %0 to i32
     %add1 = add i32 %idx, 2
     %idxprom2 = zext i32 %add1 to i64
-    %arrayidx3 = getelementptr inbounds i8, i8* %ptr, i64 %idxprom2
-    %1 = load i8, i8* %arrayidx3, align 1, !tbaa !3
+    %arrayidx3 = getelementptr inbounds i8, ptr %ptr, i64 %idxprom2
+    %1 = load i8, ptr %arrayidx3, align 1, !tbaa !3
     %conv4 = zext i8 %1 to i32
     %add5 = add nuw nsw i32 %conv4, %conv
     %conv6 = trunc i32 %add5 to i8
@@ -198,17 +198,17 @@
   }
 
   ; Function Attrs: norecurse nounwind readonly
-  define zeroext i16 @testLHZUX(i16* nocapture readonly %ptr, i32 zeroext %idx) local_unnamed_addr #1 {
+  define zeroext i16 @testLHZUX(ptr nocapture readonly %ptr, i32 zeroext %idx) local_unnamed_addr #1 {
   entry:
     %add = add i32 %idx, 1
     %idxprom = zext i32 %add to i64
-    %arrayidx = getelementptr inbounds i16, i16* %ptr, i64 %idxprom
-    %0 = load i16, i16* %arrayidx, align 2, !tbaa !6
+    %arrayidx = getelementptr inbounds i16, ptr %ptr, i64 %idxprom
+    %0 = load i16, ptr %arrayidx, align 2, !tbaa !6
     %conv = zext i16 %0 to i32
     %add1 = add i32 %idx, 2
     %idxprom2 = zext i32 %add1 to i64
-    %arrayidx3 = getelementptr inbounds i16, i16* %ptr, i64 %idxprom2
-    %1 = load i16, i16* %arrayidx3, align 2, !tbaa !6
+    %arrayidx3 = getelementptr inbounds i16, ptr %ptr, i64 %idxprom2
+    %1 = load i16, ptr %arrayidx3, align 2, !tbaa !6
     %conv4 = zext i16 %1 to i32
     %add5 = add nuw nsw i32 %conv4, %conv
     %conv6 = trunc i32 %add5 to i16
@@ -216,17 +216,17 @@
   }
 
   ; Function Attrs: norecurse nounwind readonly
-  define zeroext i16 @testLHZX(i16* nocapture readonly %ptr, i32 zeroext %idx) local_unnamed_addr #1 {
+  define zeroext i16 @testLHZX(ptr nocapture readonly %ptr, i32 zeroext %idx) local_unnamed_addr #1 {
   entry:
     %add = add i32 %idx, 1
     %idxprom = zext i32 %add to i64
-    %arrayidx = getelementptr inbounds i16, i16* %ptr, i64 %idxprom
-    %0 = load i16, i16* %arrayidx, align 2, !tbaa !6
+    %arrayidx = getelementptr inbounds i16, ptr %ptr, i64 %idxprom
+    %0 = load i16, ptr %arrayidx, align 2, !tbaa !6
     %conv = zext i16 %0 to i32
     %add1 = add i32 %idx, 2
     %idxprom2 = zext i32 %add1 to i64
-    %arrayidx3 = getelementptr inbounds i16, i16* %ptr, i64 %idxprom2
-    %1 = load i16, i16* %arrayidx3, align 2, !tbaa !6
+    %arrayidx3 = getelementptr inbounds i16, ptr %ptr, i64 %idxprom2
+    %1 = load i16, ptr %arrayidx3, align 2, !tbaa !6
     %conv4 = zext i16 %1 to i32
     %add5 = add nuw nsw i32 %conv4, %conv
     %conv6 = trunc i32 %add5 to i16
@@ -234,17 +234,17 @@
   }
 
   ; Function Attrs: norecurse nounwind readonly
-  define signext i16 @testLHAUX(i16* nocapture readonly %ptr, i32 zeroext %idx) local_unnamed_addr #1 {
+  define signext i16 @testLHAUX(ptr nocapture readonly %ptr, i32 zeroext %idx) local_unnamed_addr #1 {
   entry:
     %add = add i32 %idx, 1
     %idxprom = zext i32 %add to i64
-    %arrayidx = getelementptr inbounds i16, i16* %ptr, i64 %idxprom
-    %0 = load i16, i16* %arrayidx, align 2, !tbaa !6
+    %arrayidx = getelementptr inbounds i16, ptr %ptr, i64 %idxprom
+    %0 = load i16, ptr %arrayidx, align 2, !tbaa !6
     %conv9 = zext i16 %0 to i32
     %add1 = add i32 %idx, 2
     %idxprom2 = zext i32 %add1 to i64
-    %arrayidx3 = getelementptr inbounds i16, i16* %ptr, i64 %idxprom2
-    %1 = load i16, i16* %arrayidx3, align 2, !tbaa !6
+    %arrayidx3 = getelementptr inbounds i16, ptr %ptr, i64 %idxprom2
+    %1 = load i16, ptr %arrayidx3, align 2, !tbaa !6
     %conv410 = zext i16 %1 to i32
     %add5 = add nuw nsw i32 %conv410, %conv9
     %conv6 = trunc i32 %add5 to i16
@@ -252,17 +252,17 @@
   }
 
   ; Function Attrs: norecurse nounwind readonly
-  define signext i16 @testLHAX(i16* nocapture readonly %ptr, i32 zeroext %idx) local_unnamed_addr #1 {
+  define signext i16 @testLHAX(ptr nocapture readonly %ptr, i32 zeroext %idx) local_unnamed_addr #1 {
   entry:
     %add = add i32 %idx, 1
     %idxprom = zext i32 %add to i64
-    %arrayidx = getelementptr inbounds i16, i16* %ptr, i64 %idxprom
-    %0 = load i16, i16* %arrayidx, align 2, !tbaa !6
+    %arrayidx = getelementptr inbounds i16, ptr %ptr, i64 %idxprom
+    %0 = load i16, ptr %arrayidx, align 2, !tbaa !6
     %conv9 = zext i16 %0 to i32
     %add1 = add i32 %idx, 2
     %idxprom2 = zext i32 %add1 to i64
-    %arrayidx3 = getelementptr inbounds i16, i16* %ptr, i64 %idxprom2
-    %1 = load i16, i16* %arrayidx3, align 2, !tbaa !6
+    %arrayidx3 = getelementptr inbounds i16, ptr %ptr, i64 %idxprom2
+    %1 = load i16, ptr %arrayidx3, align 2, !tbaa !6
     %conv410 = zext i16 %1 to i32
     %add5 = add nuw nsw i32 %conv410, %conv9
     %conv6 = trunc i32 %add5 to i16
@@ -270,136 +270,136 @@
   }
 
   ; Function Attrs: norecurse nounwind readonly
-  define zeroext i32 @testLWZUX(i32* nocapture readonly %ptr, i32 zeroext %idx) local_unnamed_addr #1 {
+  define zeroext i32 @testLWZUX(ptr nocapture readonly %ptr, i32 zeroext %idx) local_unnamed_addr #1 {
   entry:
     %add = add i32 %idx, 1
     %idxprom = zext i32 %add to i64
-    %arrayidx = getelementptr inbounds i32, i32* %ptr, i64 %idxprom
-    %0 = load i32, i32* %arrayidx, align 4, !tbaa !8
+    %arrayidx = getelementptr inbounds i32, ptr %ptr, i64 %idxprom
+    %0 = load i32, ptr %arrayidx, align 4, !tbaa !8
     %add1 = add i32 %idx, 2
     %idxprom2 = zext i32 %add1 to i64
-    %arrayidx3 = getelementptr inbounds i32, i32* %ptr, i64 %idxprom2
-    %1 = load i32, i32* %arrayidx3, align 4, !tbaa !8
+    %arrayidx3 = getelementptr inbounds i32, ptr %ptr, i64 %idxprom2
+    %1 = load i32, ptr %arrayidx3, align 4, !tbaa !8
     %add4 = add i32 %1, %0
     ret i32 %add4
   }
 
   ; Function Attrs: norecurse nounwind readonly
-  define zeroext i32 @testLWZX(i32* nocapture readonly %ptr, i32 zeroext %idx) local_unnamed_addr #1 {
+  define zeroext i32 @testLWZX(ptr nocapture readonly %ptr, i32 zeroext %idx) local_unnamed_addr #1 {
   entry:
     %add = add i32 %idx, 1
     %idxprom = zext i32 %add to i64
-    %arrayidx = getelementptr inbounds i32, i32* %ptr, i64 %idxprom
-    %0 = load i32, i32* %arrayidx, align 4, !tbaa !8
+    %arrayidx = getelementptr inbounds i32, ptr %ptr, i64 %idxprom
+    %0 = load i32, ptr %arrayidx, align 4, !tbaa !8
     %add1 = add i32 %idx, 2
     %idxprom2 = zext i32 %add1 to i64
-    %arrayidx3 = getelementptr inbounds i32, i32* %ptr, i64 %idxprom2
-    %1 = load i32, i32* %arrayidx3, align 4, !tbaa !8
+    %arrayidx3 = getelementptr inbounds i32, ptr %ptr, i64 %idxprom2
+    %1 = load i32, ptr %arrayidx3, align 4, !tbaa !8
     %add4 = add i32 %1, %0
     ret i32 %add4
   }
 
   ; Function Attrs: norecurse nounwind readonly
-  define i64 @testLWAX(i32* nocapture readonly %ptr, i32 zeroext %idx) local_unnamed_addr #1 {
+  define i64 @testLWAX(ptr nocapture readonly %ptr, i32 zeroext %idx) local_unnamed_addr #1 {
   entry:
     %add = add i32 %idx, 1
     %idxprom = zext i32 %add to i64
-    %arrayidx = getelementptr inbounds i32, i32* %ptr, i64 %idxprom
-    %0 = load i32, i32* %arrayidx, align 4, !tbaa !8
+    %arrayidx = getelementptr inbounds i32, ptr %ptr, i64 %idxprom
+    %0 = load i32, ptr %arrayidx, align 4, !tbaa !8
     %conv = sext i32 %0 to i64
     %add1 = add i32 %idx, 2
     %idxprom2 = zext i32 %add1 to i64
-    %arrayidx3 = getelementptr inbounds i32, i32* %ptr, i64 %idxprom2
-    %1 = load i32, i32* %arrayidx3, align 4, !tbaa !8
+    %arrayidx3 = getelementptr inbounds i32, ptr %ptr, i64 %idxprom2
+    %1 = load i32, ptr %arrayidx3, align 4, !tbaa !8
     %conv4 = sext i32 %1 to i64
     %add5 = add nsw i64 %conv4, %conv
     ret i64 %add5
   }
 
   ; Function Attrs: norecurse nounwind readonly
-  define i64 @testLDUX(i64* nocapture readonly %ptr, i32 zeroext %idx) local_unnamed_addr #1 {
+  define i64 @testLDUX(ptr nocapture readonly %ptr, i32 zeroext %idx) local_unnamed_addr #1 {
   entry:
     %add = add i32 %idx, 1
     %idxprom = zext i32 %add to i64
-    %arrayidx = getelementptr inbounds i64, i64* %ptr, i64 %idxprom
-    %0 = load i64, i64* %arrayidx, align 8, !tbaa !10
+    %arrayidx = getelementptr inbounds i64, ptr %ptr, i64 %idxprom
+    %0 = load i64, ptr %arrayidx, align 8, !tbaa !10
     %add1 = add i32 %idx, 2
     %idxprom2 = zext i32 %add1 to i64
-    %arrayidx3 = getelementptr inbounds i64, i64* %ptr, i64 %idxprom2
-    %1 = load i64, i64* %arrayidx3, align 8, !tbaa !10
+    %arrayidx3 = getelementptr inbounds i64, ptr %ptr, i64 %idxprom2
+    %1 = load i64, ptr %arrayidx3, align 8, !tbaa !10
     %add4 = add i64 %1, %0
     ret i64 %add4
   }
 
   ; Function Attrs: norecurse nounwind readonly
-  define i64 @testLDX(i64* nocapture readonly %ptr, i32 zeroext %idx) local_unnamed_addr #1 {
+  define i64 @testLDX(ptr nocapture readonly %ptr, i32 zeroext %idx) local_unnamed_addr #1 {
   entry:
     %add = add i32 %idx, 1
     %idxprom = zext i32 %add to i64
-    %arrayidx = getelementptr inbounds i64, i64* %ptr, i64 %idxprom
-    %0 = load i64, i64* %arrayidx, align 8, !tbaa !10
+    %arrayidx = getelementptr inbounds i64, ptr %ptr, i64 %idxprom
+    %0 = load i64, ptr %arrayidx, align 8, !tbaa !10
     %add1 = add i32 %idx, 2
     %idxprom2 = zext i32 %add1 to i64
-    %arrayidx3 = getelementptr inbounds i64, i64* %ptr, i64 %idxprom2
-    %1 = load i64, i64* %arrayidx3, align 8, !tbaa !10
+    %arrayidx3 = getelementptr inbounds i64, ptr %ptr, i64 %idxprom2
+    %1 = load i64, ptr %arrayidx3, align 8, !tbaa !10
     %add4 = add i64 %1, %0
     ret i64 %add4
   }
 
   ; Function Attrs: norecurse nounwind readonly
-  define double @testLFDUX(double* nocapture readonly %ptr, i32 zeroext %idx) local_unnamed_addr #2 {
+  define double @testLFDUX(ptr nocapture readonly %ptr, i32 zeroext %idx) local_unnamed_addr #2 {
   entry:
     %add = add i32 %idx, 1
     %idxprom = zext i32 %add to i64
-    %arrayidx = getelementptr inbounds double, double* %ptr, i64 %idxprom
-    %0 = load double, double* %arrayidx, align 8, !tbaa !12
+    %arrayidx = getelementptr inbounds double, ptr %ptr, i64 %idxprom
+    %0 = load double, ptr %arrayidx, align 8, !tbaa !12
     %add1 = add i32 %idx, 2
     %idxprom2 = zext i32 %add1 to i64
-    %arrayidx3 = getelementptr inbounds double, double* %ptr, i64 %idxprom2
-    %1 = load double, double* %arrayidx3, align 8, !tbaa !12
+    %arrayidx3 = getelementptr inbounds double, ptr %ptr, i64 %idxprom2
+    %1 = load double, ptr %arrayidx3, align 8, !tbaa !12
     %add4 = fadd double %0, %1
     ret double %add4
   }
 
   ; Function Attrs: norecurse nounwind readonly
-  define double @testLFDX(double* nocapture readonly %ptr, i32 zeroext %idx) local_unnamed_addr #2 {
+  define double @testLFDX(ptr nocapture readonly %ptr, i32 zeroext %idx) local_unnamed_addr #2 {
   entry:
     %add = add i32 %idx, 1
     %idxprom = zext i32 %add to i64
-    %arrayidx = getelementptr inbounds double, double* %ptr, i64 %idxprom
-    %0 = load double, double* %arrayidx, align 8, !tbaa !12
+    %arrayidx = getelementptr inbounds double, ptr %ptr, i64 %idxprom
+    %0 = load double, ptr %arrayidx, align 8, !tbaa !12
     %add1 = add i32 %idx, 2
     %idxprom2 = zext i32 %add1 to i64
-    %arrayidx3 = getelementptr inbounds double, double* %ptr, i64 %idxprom2
-    %1 = load double, double* %arrayidx3, align 8, !tbaa !12
+    %arrayidx3 = getelementptr inbounds double, ptr %ptr, i64 %idxprom2
+    %1 = load double, ptr %arrayidx3, align 8, !tbaa !12
     %add4 = fadd double %0, %1
     ret double %add4
   }
 
   ; Function Attrs: norecurse nounwind readonly
-  define <4 x float> @testLFSUX(float* nocapture readonly %ptr, i32 signext %idx) local_unnamed_addr #2 {
+  define <4 x float> @testLFSUX(ptr nocapture readonly %ptr, i32 signext %idx) local_unnamed_addr #2 {
   entry:
     %idxprom = sext i32 %idx to i64
-    %arrayidx = getelementptr inbounds float, float* %ptr, i64 %idxprom
-    %0 = load float, float* %arrayidx, align 4, !tbaa !14
+    %arrayidx = getelementptr inbounds float, ptr %ptr, i64 %idxprom
+    %0 = load float, ptr %arrayidx, align 4, !tbaa !14
     %conv = fptoui float %0 to i32
     %vecinit = insertelement <4 x i32> undef, i32 %conv, i32 0
-    %1 = bitcast float* %ptr to i8*
+    %1 = bitcast ptr %ptr to ptr
     %2 = shl i64 %idxprom, 2
-    %uglygep = getelementptr i8, i8* %1, i64 %2
-    %uglygep2 = getelementptr i8, i8* %uglygep, i64 4
-    %3 = bitcast i8* %uglygep2 to float*
-    %4 = load float, float* %3, align 4, !tbaa !14
+    %uglygep = getelementptr i8, ptr %1, i64 %2
+    %uglygep2 = getelementptr i8, ptr %uglygep, i64 4
+    %3 = bitcast ptr %uglygep2 to ptr
+    %4 = load float, ptr %3, align 4, !tbaa !14
     %conv3 = fptoui float %4 to i32
     %vecinit4 = insertelement <4 x i32> %vecinit, i32 %conv3, i32 1
-    %uglygep5 = getelementptr i8, i8* %uglygep, i64 8
-    %5 = bitcast i8* %uglygep5 to float*
-    %6 = load float, float* %5, align 4, !tbaa !14
+    %uglygep5 = getelementptr i8, ptr %uglygep, i64 8
+    %5 = bitcast ptr %uglygep5 to ptr
+    %6 = load float, ptr %5, align 4, !tbaa !14
     %conv8 = fptoui float %6 to i32
     %vecinit9 = insertelement <4 x i32> %vecinit4, i32 %conv8, i32 2
-    %uglygep8 = getelementptr i8, i8* %uglygep, i64 12
-    %7 = bitcast i8* %uglygep8 to float*
-    %8 = load float, float* %7, align 4, !tbaa !14
+    %uglygep8 = getelementptr i8, ptr %uglygep, i64 12
+    %7 = bitcast ptr %uglygep8 to ptr
+    %8 = load float, ptr %7, align 4, !tbaa !14
     %conv13 = fptoui float %8 to i32
     %vecinit14 = insertelement <4 x i32> %vecinit9, i32 %conv13, i32 3
     %9 = bitcast <4 x i32> %vecinit14 to <4 x float>
@@ -407,61 +407,61 @@
   }
 
   ; Function Attrs: norecurse nounwind readonly
-  define float @testLFSX(float* nocapture readonly %ptr, i32 zeroext %idx) local_unnamed_addr #2 {
+  define float @testLFSX(ptr nocapture readonly %ptr, i32 zeroext %idx) local_unnamed_addr #2 {
   entry:
     %add = add i32 %idx, 1
     %idxprom = zext i32 %add to i64
-    %arrayidx = getelementptr inbounds float, float* %ptr, i64 %idxprom
-    %0 = load float, float* %arrayidx, align 4, !tbaa !14
+    %arrayidx = getelementptr inbounds float, ptr %ptr, i64 %idxprom
+    %0 = load float, ptr %arrayidx, align 4, !tbaa !14
     %add1 = add i32 %idx, 2
     %idxprom2 = zext i32 %add1 to i64
-    %arrayidx3 = getelementptr inbounds float, float* %ptr, i64 %idxprom2
-    %1 = load float, float* %arrayidx3, align 4, !tbaa !14
+    %arrayidx3 = getelementptr inbounds float, ptr %ptr, i64 %idxprom2
+    %1 = load float, ptr %arrayidx3, align 4, !tbaa !14
     %add4 = fadd float %0, %1
     ret float %add4
   }
 
   ; Function Attrs: norecurse nounwind readonly
-  define double @testLXSDX(double* nocapture readonly %ptr, i32 zeroext %idx) local_unnamed_addr #1 {
+  define double @testLXSDX(ptr nocapture readonly %ptr, i32 zeroext %idx) local_unnamed_addr #1 {
   entry:
     %add = add i32 %idx, 1
     %idxprom = zext i32 %add to i64
-    %arrayidx = getelementptr inbounds double, double* %ptr, i64 %idxprom
-    %0 = load double, double* %arrayidx, align 8, !tbaa !12
+    %arrayidx = getelementptr inbounds double, ptr %ptr, i64 %idxprom
+    %0 = load double, ptr %arrayidx, align 8, !tbaa !12
     %add1 = add i32 %idx, 2
     %idxprom2 = zext i32 %add1 to i64
-    %arrayidx3 = getelementptr inbounds double, double* %ptr, i64 %idxprom2
-    %1 = load double, double* %arrayidx3, align 8, !tbaa !12
+    %arrayidx3 = getelementptr inbounds double, ptr %ptr, i64 %idxprom2
+    %1 = load double, ptr %arrayidx3, align 8, !tbaa !12
     %add4 = fadd double %0, %1
     ret double %add4
   }
 
   ; Function Attrs: norecurse nounwind readonly
-  define float @testLXSSPX(float* nocapture readonly %ptr, i32 zeroext %idx) local_unnamed_addr #1 {
+  define float @testLXSSPX(ptr nocapture readonly %ptr, i32 zeroext %idx) local_unnamed_addr #1 {
   entry:
     %add = add i32 %idx, 1
     %idxprom = zext i32 %add to i64
-    %arrayidx = getelementptr inbounds float, float* %ptr, i64 %idxprom
-    %0 = load float, float* %arrayidx, align 4, !tbaa !14
+    %arrayidx = getelementptr inbounds float, ptr %ptr, i64 %idxprom
+    %0 = load float, ptr %arrayidx, align 4, !tbaa !14
     %add1 = add i32 %idx, 2
     %idxprom2 = zext i32 %add1 to i64
-    %arrayidx3 = getelementptr inbounds float, float* %ptr, i64 %idxprom2
-    %1 = load float, float* %arrayidx3, align 4, !tbaa !14
+    %arrayidx3 = getelementptr inbounds float, ptr %ptr, i64 %idxprom2
+    %1 = load float, ptr %arrayidx3, align 4, !tbaa !14
     %add4 = fadd float %0, %1
     ret float %add4
   }
 
   ; Function Attrs: norecurse nounwind readonly
-  define <4 x i32> @testLXVX(<4 x i32>* nocapture readonly %ptr, i32 zeroext %idx) local_unnamed_addr #1 {
+  define <4 x i32> @testLXVX(ptr nocapture readonly %ptr, i32 zeroext %idx) local_unnamed_addr #1 {
   entry:
     %add = add i32 %idx, 1
     %idxprom = zext i32 %add to i64
-    %arrayidx = getelementptr inbounds <4 x i32>, <4 x i32>* %ptr, i64 %idxprom
-    %0 = load <4 x i32>, <4 x i32>* %arrayidx, align 16, !tbaa !3
+    %arrayidx = getelementptr inbounds <4 x i32>, ptr %ptr, i64 %idxprom
+    %0 = load <4 x i32>, ptr %arrayidx, align 16, !tbaa !3
     %add1 = add i32 %idx, 2
     %idxprom2 = zext i32 %add1 to i64
-    %arrayidx3 = getelementptr inbounds <4 x i32>, <4 x i32>* %ptr, i64 %idxprom2
-    %1 = load <4 x i32>, <4 x i32>* %arrayidx3, align 16, !tbaa !3
+    %arrayidx3 = getelementptr inbounds <4 x i32>, ptr %ptr, i64 %idxprom2
+    %1 = load <4 x i32>, ptr %arrayidx3, align 16, !tbaa !3
     %add4 = add <4 x i32> %1, %0
     ret <4 x i32> %add4
   }
@@ -747,197 +747,197 @@
   }
 
   ; Function Attrs: norecurse nounwind
-  define void @testSTBUX(i8* nocapture %ptr, i8 zeroext %a, i32 zeroext %idx) local_unnamed_addr #3 {
+  define void @testSTBUX(ptr nocapture %ptr, i8 zeroext %a, i32 zeroext %idx) local_unnamed_addr #3 {
   entry:
     %add = add i32 %idx, 1
     %idxprom = zext i32 %add to i64
-    %arrayidx = getelementptr inbounds i8, i8* %ptr, i64 %idxprom
-    store i8 %a, i8* %arrayidx, align 1, !tbaa !3
+    %arrayidx = getelementptr inbounds i8, ptr %ptr, i64 %idxprom
+    store i8 %a, ptr %arrayidx, align 1, !tbaa !3
     %add1 = add i32 %idx, 2
     %idxprom2 = zext i32 %add1 to i64
-    %arrayidx3 = getelementptr inbounds i8, i8* %ptr, i64 %idxprom2
-    store i8 %a, i8* %arrayidx3, align 1, !tbaa !3
+    %arrayidx3 = getelementptr inbounds i8, ptr %ptr, i64 %idxprom2
+    store i8 %a, ptr %arrayidx3, align 1, !tbaa !3
     ret void
   }
 
   ; Function Attrs: norecurse nounwind
-  define void @testSTBX(i8* nocapture %ptr, i8 zeroext %a, i32 zeroext %idx) local_unnamed_addr #3 {
+  define void @testSTBX(ptr nocapture %ptr, i8 zeroext %a, i32 zeroext %idx) local_unnamed_addr #3 {
   entry:
     %add = add i32 %idx, 1
     %idxprom = zext i32 %add to i64
-    %arrayidx = getelementptr inbounds i8, i8* %ptr, i64 %idxprom
-    store i8 %a, i8* %arrayidx, align 1, !tbaa !3
+    %arrayidx = getelementptr inbounds i8, ptr %ptr, i64 %idxprom
+    store i8 %a, ptr %arrayidx, align 1, !tbaa !3
     %add1 = add i32 %idx, 2
     %idxprom2 = zext i32 %add1 to i64
-    %arrayidx3 = getelementptr inbounds i8, i8* %ptr, i64 %idxprom2
-    store i8 %a, i8* %arrayidx3, align 1, !tbaa !3
+    %arrayidx3 = getelementptr inbounds i8, ptr %ptr, i64 %idxprom2
+    store i8 %a, ptr %arrayidx3, align 1, !tbaa !3
     ret void
   }
 
   ; Function Attrs: norecurse nounwind
-  define void @testSTHUX(i16* nocapture %ptr, i16 zeroext %a, i32 zeroext %idx) local_unnamed_addr #3 {
+  define void @testSTHUX(ptr nocapture %ptr, i16 zeroext %a, i32 zeroext %idx) local_unnamed_addr #3 {
   entry:
     %add = add i32 %idx, 1
     %idxprom = zext i32 %add to i64
-    %arrayidx = getelementptr inbounds i16, i16* %ptr, i64 %idxprom
-    store i16 %a, i16* %arrayidx, align 2, !tbaa !6
+    %arrayidx = getelementptr inbounds i16, ptr %ptr, i64 %idxprom
+    store i16 %a, ptr %arrayidx, align 2, !tbaa !6
     %add1 = add i32 %idx, 2
     %idxprom2 = zext i32 %add1 to i64
-    %arrayidx3 = getelementptr inbounds i16, i16* %ptr, i64 %idxprom2
-    store i16 %a, i16* %arrayidx3, align 2, !tbaa !6
+    %arrayidx3 = getelementptr inbounds i16, ptr %ptr, i64 %idxprom2
+    store i16 %a, ptr %arrayidx3, align 2, !tbaa !6
     ret void
   }
 
   ; Function Attrs: norecurse nounwind
-  define void @testSTHX(i16* nocapture %ptr, i16 zeroext %a, i32 zeroext %idx) local_unnamed_addr #3 {
+  define void @testSTHX(ptr nocapture %ptr, i16 zeroext %a, i32 zeroext %idx) local_unnamed_addr #3 {
   entry:
     %add = add i32 %idx, 1
     %idxprom = zext i32 %add to i64
-    %arrayidx = getelementptr inbounds i16, i16* %ptr, i64 %idxprom
-    store i16 %a, i16* %arrayidx, align 1, !tbaa !3
+    %arrayidx = getelementptr inbounds i16, ptr %ptr, i64 %idxprom
+    store i16 %a, ptr %arrayidx, align 1, !tbaa !3
     %add1 = add i32 %idx, 2
     %idxprom2 = zext i32 %add1 to i64
-    %arrayidx3 = getelementptr inbounds i16, i16* %ptr, i64 %idxprom2
-    store i16 %a, i16* %arrayidx3, align 1, !tbaa !3
+    %arrayidx3 = getelementptr inbounds i16, ptr %ptr, i64 %idxprom2
+    store i16 %a, ptr %arrayidx3, align 1, !tbaa !3
     ret void
   }
 
   ; Function Attrs: norecurse nounwind
-  define void @testSTWUX(i32* nocapture %ptr, i32 zeroext %a, i32 zeroext %idx) local_unnamed_addr #3 {
+  define void @testSTWUX(ptr nocapture %ptr, i32 zeroext %a, i32 zeroext %idx) local_unnamed_addr #3 {
   entry:
     %add = add i32 %idx, 1
     %idxprom = zext i32 %add to i64
-    %arrayidx = getelementptr inbounds i32, i32* %ptr, i64 %idxprom
-    store i32 %a, i32* %arrayidx, align 4, !tbaa !8
+    %arrayidx = getelementptr inbounds i32, ptr %ptr, i64 %idxprom
+    store i32 %a, ptr %arrayidx, align 4, !tbaa !8
     %add1 = add i32 %idx, 2
     %idxprom2 = zext i32 %add1 to i64
-    %arrayidx3 = getelementptr inbounds i32, i32* %ptr, i64 %idxprom2
-    store i32 %a, i32* %arrayidx3, align 4, !tbaa !8
+    %arrayidx3 = getelementptr inbounds i32, ptr %ptr, i64 %idxprom2
+    store i32 %a, ptr %arrayidx3, align 4, !tbaa !8
     ret void
   }
 
   ; Function Attrs: norecurse nounwind
-  define void @testSTWX(i32* nocapture %ptr, i32 zeroext %a, i32 zeroext %idx) local_unnamed_addr #3 {
+  define void @testSTWX(ptr nocapture %ptr, i32 zeroext %a, i32 zeroext %idx) local_unnamed_addr #3 {
   entry:
     %add = add i32 %idx, 1
     %idxprom = zext i32 %add to i64
-    %arrayidx = getelementptr inbounds i32, i32* %ptr, i64 %idxprom
-    store i32 %a, i32* %arrayidx, align 4, !tbaa !8
+    %arrayidx = getelementptr inbounds i32, ptr %ptr, i64 %idxprom
+    store i32 %a, ptr %arrayidx, align 4, !tbaa !8
     %add1 = add i32 %idx, 2
     %idxprom2 = zext i32 %add1 to i64
-    %arrayidx3 = getelementptr inbounds i32, i32* %ptr, i64 %idxprom2
-    store i32 %a, i32* %arrayidx3, align 4, !tbaa !8
+    %arrayidx3 = getelementptr inbounds i32, ptr %ptr, i64 %idxprom2
+    store i32 %a, ptr %arrayidx3, align 4, !tbaa !8
     ret void
   }
 
   ; Function Attrs: norecurse nounwind
-  define void @testSTDUX(i64* nocapture %ptr, i64 %a, i32 zeroext %idx) local_unnamed_addr #3 {
+  define void @testSTDUX(ptr nocapture %ptr, i64 %a, i32 zeroext %idx) local_unnamed_addr #3 {
   entry:
     %add = add i32 %idx, 1
     %idxprom = zext i32 %add to i64
-    %arrayidx = getelementptr inbounds i64, i64* %ptr, i64 %idxprom
-    store i64 %a, i64* %arrayidx, align 8, !tbaa !10
+    %arrayidx = getelementptr inbounds i64, ptr %ptr, i64 %idxprom
+    store i64 %a, ptr %arrayidx, align 8, !tbaa !10
     %add1 = add i32 %idx, 2
     %idxprom2 = zext i32 %add1 to i64
-    %arrayidx3 = getelementptr inbounds i64, i64* %ptr, i64 %idxprom2
-    store i64 %a, i64* %arrayidx3, align 8, !tbaa !10
+    %arrayidx3 = getelementptr inbounds i64, ptr %ptr, i64 %idxprom2
+    store i64 %a, ptr %arrayidx3, align 8, !tbaa !10
     ret void
   }
 
   ; Function Attrs: norecurse nounwind
-  define void @testSTDX(i64* nocapture %ptr, i64 %a, i32 zeroext %idx) local_unnamed_addr #3 {
+  define void @testSTDX(ptr nocapture %ptr, i64 %a, i32 zeroext %idx) local_unnamed_addr #3 {
   entry:
     %add = add i32 %idx, 1
     %idxprom = zext i32 %add to i64
-    %arrayidx = getelementptr inbounds i64, i64* %ptr, i64 %idxprom
-    store i64 %a, i64* %arrayidx, align 8, !tbaa !10
+    %arrayidx = getelementptr inbounds i64, ptr %ptr, i64 %idxprom
+    store i64 %a, ptr %arrayidx, align 8, !tbaa !10
     %add1 = add i32 %idx, 2
     %idxprom2 = zext i32 %add1 to i64
-    %arrayidx3 = getelementptr inbounds i64, i64* %ptr, i64 %idxprom2
-    store i64 %a, i64* %arrayidx3, align 8, !tbaa !10
+    %arrayidx3 = getelementptr inbounds i64, ptr %ptr, i64 %idxprom2
+    store i64 %a, ptr %arrayidx3, align 8, !tbaa !10
     ret void
   }
 
   ; Function Attrs: norecurse nounwind readonly
-  define void @testSTFSX(float* nocapture %ptr, float %a, i32 zeroext %idx) local_unnamed_addr #2 {
+  define void @testSTFSX(ptr nocapture %ptr, float %a, i32 zeroext %idx) local_unnamed_addr #2 {
   entry:
     %add = add i32 %idx, 1
     %idxprom = zext i32 %add to i64
-    %arrayidx = getelementptr inbounds float, float* %ptr, i64 %idxprom
-    store float %a, float* %arrayidx, align 4, !tbaa !14
+    %arrayidx = getelementptr inbounds float, ptr %ptr, i64 %idxprom
+    store float %a, ptr %arrayidx, align 4, !tbaa !14
     %add1 = add i32 %idx, 2
     %idxprom2 = zext i32 %add1 to i64
-    %arrayidx3 = getelementptr inbounds float, float* %ptr, i64 %idxprom2
-    store float %a, float* %arrayidx3, align 4, !tbaa !14
+    %arrayidx3 = getelementptr inbounds float, ptr %ptr, i64 %idxprom2
+    store float %a, ptr %arrayidx3, align 4, !tbaa !14
     ret void
   }
 
   ; Function Attrs: norecurse nounwind readonly
-  define void @testSTFSUX(float* nocapture %ptr, float %a, i32 zeroext %idx) local_unnamed_addr #2 {
+  define void @testSTFSUX(ptr nocapture %ptr, float %a, i32 zeroext %idx) local_unnamed_addr #2 {
   entry:
     %add = add i32 %idx, 1
     %idxprom = zext i32 %add to i64
-    %arrayidx = getelementptr inbounds float, float* %ptr, i64 %idxprom
-    store float %a, float* %arrayidx, align 4, !tbaa !14
+    %arrayidx = getelementptr inbounds float, ptr %ptr, i64 %idxprom
+    store float %a, ptr %arrayidx, align 4, !tbaa !14
     %add1 = add i32 %idx, 2
     %idxprom2 = zext i32 %add1 to i64
-    %arrayidx3 = getelementptr inbounds float, float* %ptr, i64 %idxprom2
-    store float %a, float* %arrayidx3, align 4, !tbaa !14
+    %arrayidx3 = getelementptr inbounds float, ptr %ptr, i64 %idxprom2
+    store float %a, ptr %arrayidx3, align 4, !tbaa !14
     ret void
   }
 
   ; Function Attrs: norecurse nounwind readonly
-  define void @testSTFDX(double* nocapture %ptr, double %a, i32 zeroext %idx) local_unnamed_addr #2 {
+  define void @testSTFDX(ptr nocapture %ptr, double %a, i32 zeroext %idx) local_unnamed_addr #2 {
   entry:
     %add = add i32 %idx, 1
     %idxprom = zext i32 %add to i64
-    %arrayidx = getelementptr inbounds double, double* %ptr, i64 %idxprom
-    store double %a, double* %arrayidx, align 8, !tbaa !12
+    %arrayidx = getelementptr inbounds double, ptr %ptr, i64 %idxprom
+    store double %a, ptr %arrayidx, align 8, !tbaa !12
     %add1 = add i32 %idx, 2
     %idxprom2 = zext i32 %add1 to i64
-    %arrayidx3 = getelementptr inbounds double, double* %ptr, i64 %idxprom2
-    store double %a, double* %arrayidx3, align 8, !tbaa !12
+    %arrayidx3 = getelementptr inbounds double, ptr %ptr, i64 %idxprom2
+    store double %a, ptr %arrayidx3, align 8, !tbaa !12
     ret void
   }
 
   ; Function Attrs: norecurse nounwind readonly
-  define void @testSTFDUX(double* nocapture %ptr, double %a, i32 zeroext %idx) local_unnamed_addr #2 {
+  define void @testSTFDUX(ptr nocapture %ptr, double %a, i32 zeroext %idx) local_unnamed_addr #2 {
   entry:
     %add = add i32 %idx, 1
     %idxprom = zext i32 %add to i64
-    %arrayidx = getelementptr inbounds double, double* %ptr, i64 %idxprom
-    store double %a, double* %arrayidx, align 8, !tbaa !12
+    %arrayidx = getelementptr inbounds double, ptr %ptr, i64 %idxprom
+    store double %a, ptr %arrayidx, align 8, !tbaa !12
     %add1 = add i32 %idx, 2
     %idxprom2 = zext i32 %add1 to i64
-    %arrayidx3 = getelementptr inbounds double, double* %ptr, i64 %idxprom2
-    store double %a, double* %arrayidx3, align 8, !tbaa !12
+    %arrayidx3 = getelementptr inbounds double, ptr %ptr, i64 %idxprom2
+    store double %a, ptr %arrayidx3, align 8, !tbaa !12
     ret void
   }
 
   ; Function Attrs: norecurse nounwind
-  define void @testSTXSSPX(float* nocapture %ptr, float %a, i32 zeroext %idx) local_unnamed_addr #3 {
+  define void @testSTXSSPX(ptr nocapture %ptr, float %a, i32 zeroext %idx) local_unnamed_addr #3 {
   entry:
     %idxprom = zext i32 %idx to i64
-    %arrayidx = getelementptr inbounds float, float* %ptr, i64 %idxprom
-    store float %a, float* %arrayidx, align 4, !tbaa !14
+    %arrayidx = getelementptr inbounds float, ptr %ptr, i64 %idxprom
+    store float %a, ptr %arrayidx, align 4, !tbaa !14
     ret void
   }
 
   ; Function Attrs: norecurse nounwind
-  define void @testSTXSDX(double* nocapture %ptr, double %a, i32 zeroext %idx) local_unnamed_addr #3 {
+  define void @testSTXSDX(ptr nocapture %ptr, double %a, i32 zeroext %idx) local_unnamed_addr #3 {
   entry:
     %idxprom = zext i32 %idx to i64
-    %arrayidx = getelementptr inbounds double, double* %ptr, i64 %idxprom
-    store double %a, double* %arrayidx, align 8, !tbaa !12
+    %arrayidx = getelementptr inbounds double, ptr %ptr, i64 %idxprom
+    store double %a, ptr %arrayidx, align 8, !tbaa !12
     ret void
   }
 
   ; Function Attrs: norecurse nounwind
-  define void @testSTXVX(<4 x i32>* nocapture %ptr, <4 x i32> %a, i32 zeroext %idx) local_unnamed_addr #3 {
+  define void @testSTXVX(ptr nocapture %ptr, <4 x i32> %a, i32 zeroext %idx) local_unnamed_addr #3 {
   entry:
     %idxprom = zext i32 %idx to i64
-    %arrayidx = getelementptr inbounds <4 x i32>, <4 x i32>* %ptr, i64 %idxprom
-    store <4 x i32> %a, <4 x i32>* %arrayidx, align 16, !tbaa !3
+    %arrayidx = getelementptr inbounds <4 x i32>, ptr %ptr, i64 %idxprom
+    store <4 x i32> %a, ptr %arrayidx, align 16, !tbaa !3
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/PowerPC/expand-foldable-isel.ll b/llvm/test/CodeGen/PowerPC/expand-foldable-isel.ll
index 4377e800c2548..8da7519fa6dc7 100644
--- a/llvm/test/CodeGen/PowerPC/expand-foldable-isel.ll
+++ b/llvm/test/CodeGen/PowerPC/expand-foldable-isel.ll
@@ -17,11 +17,11 @@ target triple = "powerpc64le-unknown-linux-gnu"
 
 ; RUN: llc -verify-machineinstrs -O2 -ppc-asm-full-reg-names -mcpu=pwr7 -ppc-gen-isel=true < %s | FileCheck %s --check-prefix=CHECK-GEN-ISEL-TRUE
 ; RUN: llc -verify-machineinstrs -O2 -ppc-asm-full-reg-names -mcpu=pwr7 -ppc-gen-isel=false < %s | FileCheck %s --implicit-check-not isel
-%"struct.pov::ot_block_struct" = type { %"struct.pov::ot_block_struct"*, [3 x double], [3 x double], float, float, float, float, float, float, float, float, float, [3 x float], float, float, [3 x double], i16 }
-%"struct.pov::ot_node_struct" = type { %"struct.pov::ot_id_struct", %"struct.pov::ot_block_struct"*, [8 x %"struct.pov::ot_node_struct"*] }
+%"struct.pov::ot_block_struct" = type { ptr, [3 x double], [3 x double], float, float, float, float, float, float, float, float, float, [3 x float], float, float, [3 x double], i16 }
+%"struct.pov::ot_node_struct" = type { %"struct.pov::ot_id_struct", ptr, [8 x ptr] }
 %"struct.pov::ot_id_struct" = type { i32, i32, i32, i32 }
 
-define void @_ZN3pov6ot_insEPPNS_14ot_node_structEPNS_15ot_block_structEPNS_12ot_id_structE(%"struct.pov::ot_block_struct"* %new_block) {
+define void @_ZN3pov6ot_insEPPNS_14ot_node_structEPNS_15ot_block_structEPNS_12ot_id_structE(ptr %new_block) {
 ; CHECK-GEN-ISEL-TRUE-LABEL: _ZN3pov6ot_insEPPNS_14ot_node_structEPNS_15ot_block_structEPNS_12ot_id_structE:
 ; CHECK-GEN-ISEL-TRUE:       # %bb.0: # %entry
 ; CHECK-GEN-ISEL-TRUE-NEXT:    mflr r0
@@ -115,21 +115,21 @@ entry:
   br label %while.cond11
 
 while.cond11:
-  %this_node.0250 = phi %"struct.pov::ot_node_struct"* [ undef, %entry ], [ %1, %cond.false21.i156 ], [ %1, %cond.true18.i153 ]
+  %this_node.0250 = phi ptr [ undef, %entry ], [ %1, %cond.false21.i156 ], [ %1, %cond.true18.i153 ]
   %temp_id.sroa.21.1 = phi i32 [ undef, %entry ], [ %shr2039.i152, %cond.true18.i153 ], [ %div24.i155, %cond.false21.i156 ]
-  %0 = load i32, i32* undef, align 4
+  %0 = load i32, ptr undef, align 4
   %cmp17 = icmp eq i32 0, %0
   br i1 %cmp17, label %lor.rhs, label %while.body21
 
 lor.rhs:
-  %Values = getelementptr inbounds %"struct.pov::ot_node_struct", %"struct.pov::ot_node_struct"* %this_node.0250, i64 0, i32 1
-  store %"struct.pov::ot_block_struct"* %new_block, %"struct.pov::ot_block_struct"** %Values, align 8
+  %Values = getelementptr inbounds %"struct.pov::ot_node_struct", ptr %this_node.0250, i64 0, i32 1
+  store ptr %new_block, ptr %Values, align 8
   ret void
 
 while.body21:
-  %call.i84 = tail call i8* @ZN3pov10pov_callocEmmPKciS1_pov()
-  store i8* %call.i84, i8** undef, align 8
-  %1 = bitcast i8* %call.i84 to %"struct.pov::ot_node_struct"*
+  %call.i84 = tail call ptr @ZN3pov10pov_callocEmmPKciS1_pov()
+  store ptr %call.i84, ptr undef, align 8
+  %1 = bitcast ptr %call.i84 to ptr
   br i1 undef, label %cond.true18.i153, label %cond.false21.i156
 
 cond.true18.i153:
@@ -142,4 +142,4 @@ cond.false21.i156:
   br label %while.cond11
 }
 
-declare i8* @ZN3pov10pov_callocEmmPKciS1_pov()
+declare ptr @ZN3pov10pov_callocEmmPKciS1_pov()

diff  --git a/llvm/test/CodeGen/PowerPC/fast-isel-branch.ll b/llvm/test/CodeGen/PowerPC/fast-isel-branch.ll
index 98f08830e3c82..4fc282befe6cd 100644
--- a/llvm/test/CodeGen/PowerPC/fast-isel-branch.ll
+++ b/llvm/test/CodeGen/PowerPC/fast-isel-branch.ll
@@ -76,24 +76,24 @@ define signext i32 @bar() #0 {
 entry:
   %retval = alloca i32, align 4
   %i = alloca i32, align 4
-  store i32 0, i32* %retval, align 4
-  store i32 0, i32* %i, align 4
+  store i32 0, ptr %retval, align 4
+  store i32 0, ptr %i, align 4
   br label %for.cond
 
 for.cond:
-  %0 = load i32, i32* %i, align 4
-  %1 = load i32, i32* @x, align 4
+  %0 = load i32, ptr %i, align 4
+  %1 = load i32, ptr @x, align 4
   %cmp = icmp slt i32 %0, %1
   br i1 %cmp, label %for.body, label %for.end
 
 for.body:
-  call void bitcast (void (...)* @foo to void ()*)()
+  call void @foo()
   br label %for.inc
 
 for.inc:
-  %2 = load i32, i32* %i, align 4
+  %2 = load i32, ptr %i, align 4
   %inc = add nsw i32 %2, 1
-  store i32 %inc, i32* %i, align 4
+  store i32 %inc, ptr %i, align 4
   br label %for.cond
 
 for.end:

diff  --git a/llvm/test/CodeGen/PowerPC/lsr-insns-cost.ll b/llvm/test/CodeGen/PowerPC/lsr-insns-cost.ll
index 29d51e49ab240..bc6e6e51d763a 100644
--- a/llvm/test/CodeGen/PowerPC/lsr-insns-cost.ll
+++ b/llvm/test/CodeGen/PowerPC/lsr-insns-cost.ll
@@ -11,7 +11,7 @@
 ;
 ; compile with -fno-unroll-loops
 
-define void @lsr-insts-cost(i32* %0, i32* %1, i32* %2) {
+define void @lsr-insts-cost(ptr %0, ptr %1, ptr %2) {
 ; INST-LABEL: lsr-insts-cost
 ; INST:       .LBB0_4: # =>This Inner Loop Header: Depth=1
 ; INST-NEXT:    lxvd2x vs34, r3, r6
@@ -31,30 +31,30 @@ define void @lsr-insts-cost(i32* %0, i32* %1, i32* %2) {
 ; REG-NEXT:    stxvd2x vs34, 0, r5
 ; REG-NEXT:    addi r5, r5, 16
 ; REG-NEXT:    bdnz .LBB0_4
-  %4 = getelementptr i32, i32* %2, i64 1024
-  %5 = getelementptr i32, i32* %0, i64 1024
-  %6 = getelementptr i32, i32* %1, i64 1024
-  %7 = icmp ugt i32* %5, %2
-  %8 = icmp ugt i32* %4, %0
+  %4 = getelementptr i32, ptr %2, i64 1024
+  %5 = getelementptr i32, ptr %0, i64 1024
+  %6 = getelementptr i32, ptr %1, i64 1024
+  %7 = icmp ugt ptr %5, %2
+  %8 = icmp ugt ptr %4, %0
   %9 = and i1 %7, %8
-  %10 = icmp ugt i32* %6, %2
-  %11 = icmp ugt i32* %4, %1
+  %10 = icmp ugt ptr %6, %2
+  %11 = icmp ugt ptr %4, %1
   %12 = and i1 %10, %11
   %13 = or i1 %9, %12
   br i1 %13, label %28, label %14
 
 14:                                               ; preds = %3, %14
   %15 = phi i64 [ %25, %14 ], [ 0, %3 ]
-  %16 = getelementptr inbounds i32, i32* %0, i64 %15
-  %17 = bitcast i32* %16 to <4 x i32>*
-  %18 = load <4 x i32>, <4 x i32>* %17, align 4
-  %19 = getelementptr inbounds i32, i32* %1, i64 %15
-  %20 = bitcast i32* %19 to <4 x i32>*
-  %21 = load <4 x i32>, <4 x i32>* %20, align 4
+  %16 = getelementptr inbounds i32, ptr %0, i64 %15
+  %17 = bitcast ptr %16 to ptr
+  %18 = load <4 x i32>, ptr %17, align 4
+  %19 = getelementptr inbounds i32, ptr %1, i64 %15
+  %20 = bitcast ptr %19 to ptr
+  %21 = load <4 x i32>, ptr %20, align 4
   %22 = add <4 x i32> %21, %18
-  %23 = getelementptr inbounds i32, i32* %2, i64 %15
-  %24 = bitcast i32* %23 to <4 x i32>*
-  store <4 x i32> %22, <4 x i32>* %24, align 4
+  %23 = getelementptr inbounds i32, ptr %2, i64 %15
+  %24 = bitcast ptr %23 to ptr
+  store <4 x i32> %22, ptr %24, align 4
   %25 = add i64 %15, 4
   %26 = icmp eq i64 %25, 1024
   br i1 %26, label %27, label %14
@@ -64,13 +64,13 @@ define void @lsr-insts-cost(i32* %0, i32* %1, i32* %2) {
 
 28:                                               ; preds = %3, %28
   %29 = phi i64 [ %36, %28 ], [ 0, %3 ]
-  %30 = getelementptr inbounds i32, i32* %0, i64 %29
-  %31 = load i32, i32* %30, align 4
-  %32 = getelementptr inbounds i32, i32* %1, i64 %29
-  %33 = load i32, i32* %32, align 4
+  %30 = getelementptr inbounds i32, ptr %0, i64 %29
+  %31 = load i32, ptr %30, align 4
+  %32 = getelementptr inbounds i32, ptr %1, i64 %29
+  %33 = load i32, ptr %32, align 4
   %34 = add i32 %33, %31
-  %35 = getelementptr inbounds i32, i32* %2, i64 %29
-  store i32 %34, i32* %35, align 4
+  %35 = getelementptr inbounds i32, ptr %2, i64 %29
+  store i32 %34, ptr %35, align 4
   %36 = add nuw nsw i64 %29, 1
   %37 = icmp eq i64 %36, 1024
   br i1 %37, label %27, label %28

diff  --git a/llvm/test/CodeGen/PowerPC/ppc-TOC-stats.ll b/llvm/test/CodeGen/PowerPC/ppc-TOC-stats.ll
index 1337c8eb47755..e389b2d9d1716 100644
--- a/llvm/test/CodeGen/PowerPC/ppc-TOC-stats.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc-TOC-stats.ll
@@ -233,12 +233,12 @@ return:                                           ; preds = %if.else3, %if.else,
 }
 
 
-define i8* @testBlockAddr() {
+define ptr @testBlockAddr() {
 entry:
   br label %here
 
 here:
-  ret i8* blockaddress(@testBlockAddr, %here)
+  ret ptr blockaddress(@testBlockAddr, %here)
 }
 
 define noundef signext i32 @_Z5getG4v() local_unnamed_addr {

diff  --git a/llvm/test/CodeGen/PowerPC/ppc32-selectcc-i64.ll b/llvm/test/CodeGen/PowerPC/ppc32-selectcc-i64.ll
index 801944315de00..3baac2cbc5450 100644
--- a/llvm/test/CodeGen/PowerPC/ppc32-selectcc-i64.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc32-selectcc-i64.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -verify-machineinstrs -mtriple=powerpc -mcpu=pwr7 < %s | FileCheck %s
 
 ; This piece of IR is expanded from memcmp.
-define i1 @cmp(i8* %a, i8* %b) {
+define i1 @cmp(ptr %a, ptr %b) {
 ; CHECK-LABEL: cmp:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lwz 5, 4(3)
@@ -39,18 +39,18 @@ res_block:
   br label %endblock
 
 loadbb:
-  %2 = bitcast i8* %a to i64*
-  %3 = bitcast i8* %b to i64*
-  %4 = load i64, i64* %2, align 1
-  %5 = load i64, i64* %3, align 1
+  %2 = bitcast ptr %a to ptr
+  %3 = bitcast ptr %b to ptr
+  %4 = load i64, ptr %2, align 1
+  %5 = load i64, ptr %3, align 1
   %6 = icmp eq i64 %4, %5
   br i1 %6, label %loadbb1, label %res_block
 
 loadbb1:
-  %7 = getelementptr i8, i8* %a, i64 8
-  %8 = getelementptr i8, i8* %b, i64 8
-  %9 = load i8, i8* %7, align 1
-  %10 = load i8, i8* %8, align 1
+  %7 = getelementptr i8, ptr %a, i64 8
+  %8 = getelementptr i8, ptr %b, i64 8
+  %9 = load i8, ptr %7, align 1
+  %10 = load i8, ptr %8, align 1
   %11 = zext i8 %9 to i32
   %12 = zext i8 %10 to i32
   %13 = sub i32 %11, %12

diff  --git a/llvm/test/CodeGen/PowerPC/preincprep-i64-check.ll b/llvm/test/CodeGen/PowerPC/preincprep-i64-check.ll
index 64166a02aa851..2b2bd8b96eb38 100644
--- a/llvm/test/CodeGen/PowerPC/preincprep-i64-check.ll
+++ b/llvm/test/CodeGen/PowerPC/preincprep-i64-check.ll
@@ -8,24 +8,24 @@
 
 @result = local_unnamed_addr global i64 0, align 8
 
-define i64 @test_preinc_i64_ld(i8* nocapture readonly, i64) local_unnamed_addr {
+define i64 @test_preinc_i64_ld(ptr nocapture readonly, i64) local_unnamed_addr {
   %3 = icmp eq i64 %1, 0
   br i1 %3, label %4, label %6
 
 ; <label>:4:                                      ; preds = %2
-  %5 = load i64, i64* @result, align 8
+  %5 = load i64, ptr @result, align 8
   br label %13
 
 ; <label>:6:                                      ; preds = %2
-  %7 = getelementptr inbounds i8, i8* %0, i64 -50000
-  %8 = getelementptr inbounds i8, i8* %0, i64 -61024
-  %9 = getelementptr inbounds i8, i8* %0, i64 -62048
-  %10 = getelementptr inbounds i8, i8* %0, i64 -64096
-  %11 = load i64, i64* @result, align 8
+  %7 = getelementptr inbounds i8, ptr %0, i64 -50000
+  %8 = getelementptr inbounds i8, ptr %0, i64 -61024
+  %9 = getelementptr inbounds i8, ptr %0, i64 -62048
+  %10 = getelementptr inbounds i8, ptr %0, i64 -64096
+  %11 = load i64, ptr @result, align 8
   br label %15
 
 ; <label>:12:                                     ; preds = %15
-  store i64 %33, i64* @result, align 8
+  store i64 %33, ptr @result, align 8
   br label %13
 
 ; <label>:13:                                     ; preds = %12, %4
@@ -35,18 +35,18 @@ define i64 @test_preinc_i64_ld(i8* nocapture readonly, i64) local_unnamed_addr {
 ; <label>:15:                                     ; preds = %15, %6
   %16 = phi i64 [ %11, %6 ], [ %33, %15 ]
   %17 = phi i64 [ 0, %6 ], [ %34, %15 ]
-  %18 = getelementptr inbounds i8, i8* %7, i64 %17
-  %19 = bitcast i8* %18 to i64*
-  %20 = load i64, i64* %19, align 8
-  %21 = getelementptr inbounds i8, i8* %8, i64 %17
-  %22 = bitcast i8* %21 to i64*
-  %23 = load i64, i64* %22, align 8
-  %24 = getelementptr inbounds i8, i8* %9, i64 %17
-  %25 = bitcast i8* %24 to i64*
-  %26 = load i64, i64* %25, align 8
-  %27 = getelementptr inbounds i8, i8* %10, i64 %17
-  %28 = bitcast i8* %27 to i64*
-  %29 = load i64, i64* %28, align 8
+  %18 = getelementptr inbounds i8, ptr %7, i64 %17
+  %19 = bitcast ptr %18 to ptr
+  %20 = load i64, ptr %19, align 8
+  %21 = getelementptr inbounds i8, ptr %8, i64 %17
+  %22 = bitcast ptr %21 to ptr
+  %23 = load i64, ptr %22, align 8
+  %24 = getelementptr inbounds i8, ptr %9, i64 %17
+  %25 = bitcast ptr %24 to ptr
+  %26 = load i64, ptr %25, align 8
+  %27 = getelementptr inbounds i8, ptr %10, i64 %17
+  %28 = bitcast ptr %27 to ptr
+  %29 = load i64, ptr %28, align 8
   %30 = mul i64 %23, %20
   %31 = mul i64 %30, %26
   %32 = mul i64 %31, %29
@@ -66,26 +66,26 @@ define i64 @test_preinc_i64_ld(i8* nocapture readonly, i64) local_unnamed_addr {
 ; CHECK-DAG: ld {{[0-9]+}},  0([[REG1]])
 ; CHECK: blr
 
-define i64 @test_preinc_i64_ldst(i8* nocapture, i64, i64) local_unnamed_addr {
+define i64 @test_preinc_i64_ldst(ptr nocapture, i64, i64) local_unnamed_addr {
   %4 = icmp eq i64 %1, 0
   br i1 %4, label %5, label %7
 
 ; <label>:5:                                      ; preds = %3
-  %6 = load i64, i64* @result, align 8
+  %6 = load i64, ptr @result, align 8
   br label %16
 
 ; <label>:7:                                      ; preds = %3
   %8 = add i64 %2, 1
-  %9 = getelementptr inbounds i8, i8* %0, i64 -1024
+  %9 = getelementptr inbounds i8, ptr %0, i64 -1024
   %10 = add i64 %2, 2
-  %11 = getelementptr inbounds i8, i8* %0, i64 -2048
-  %12 = getelementptr inbounds i8, i8* %0, i64 -3072
-  %13 = getelementptr inbounds i8, i8* %0, i64 -4096
-  %14 = load i64, i64* @result, align 8
+  %11 = getelementptr inbounds i8, ptr %0, i64 -2048
+  %12 = getelementptr inbounds i8, ptr %0, i64 -3072
+  %13 = getelementptr inbounds i8, ptr %0, i64 -4096
+  %14 = load i64, ptr @result, align 8
   br label %18
 
 ; <label>:15:                                     ; preds = %18
-  store i64 %32, i64* @result, align 8
+  store i64 %32, ptr @result, align 8
   br label %16
 
 ; <label>:16:                                     ; preds = %15, %5
@@ -95,18 +95,18 @@ define i64 @test_preinc_i64_ldst(i8* nocapture, i64, i64) local_unnamed_addr {
 ; <label>:18:                                     ; preds = %18, %7
   %19 = phi i64 [ %14, %7 ], [ %32, %18 ]
   %20 = phi i64 [ 0, %7 ], [ %33, %18 ]
-  %21 = getelementptr inbounds i8, i8* %9, i64 %20
-  %22 = bitcast i8* %21 to i64*
-  store i64 %8, i64* %22, align 8
-  %23 = getelementptr inbounds i8, i8* %11, i64 %20
-  %24 = bitcast i8* %23 to i64*
-  store i64 %10, i64* %24, align 8
-  %25 = getelementptr inbounds i8, i8* %12, i64 %20
-  %26 = bitcast i8* %25 to i64*
-  %27 = load i64, i64* %26, align 8
-  %28 = getelementptr inbounds i8, i8* %13, i64 %20
-  %29 = bitcast i8* %28 to i64*
-  %30 = load i64, i64* %29, align 8
+  %21 = getelementptr inbounds i8, ptr %9, i64 %20
+  %22 = bitcast ptr %21 to ptr
+  store i64 %8, ptr %22, align 8
+  %23 = getelementptr inbounds i8, ptr %11, i64 %20
+  %24 = bitcast ptr %23 to ptr
+  store i64 %10, ptr %24, align 8
+  %25 = getelementptr inbounds i8, ptr %12, i64 %20
+  %26 = bitcast ptr %25 to ptr
+  %27 = load i64, ptr %26, align 8
+  %28 = getelementptr inbounds i8, ptr %13, i64 %20
+  %29 = bitcast ptr %28 to ptr
+  %30 = load i64, ptr %29, align 8
   %31 = mul i64 %30, %27
   %32 = mul i64 %31, %19
   %33 = add nuw i64 %20, 1

diff  --git a/llvm/test/CodeGen/PowerPC/preincprep-nontrans-crash.ll b/llvm/test/CodeGen/PowerPC/preincprep-nontrans-crash.ll
index 8feb62b8a1d50..f802a88cf123c 100644
--- a/llvm/test/CodeGen/PowerPC/preincprep-nontrans-crash.ll
+++ b/llvm/test/CodeGen/PowerPC/preincprep-nontrans-crash.ll
@@ -15,7 +15,7 @@ define void @ety2_() #0 {
 ; CHECK-LABEL: @ety2_
 
 L.entry:
-  %0 = load i32, i32* undef, align 4
+  %0 = load i32, ptr undef, align 4
   %1 = sext i32 %0 to i64
   %2 = shl nsw i64 %1, 3
   %3 = add nsw i64 %2, 8
@@ -50,11 +50,11 @@ L.LB1_769:                                        ; preds = %L.LB1_432
   %9 = mul i64 %8, %1
   %10 = add i64 %9, %7
   %11 = shl i64 %10, 3
-  %12 = getelementptr i8, i8* undef, i64 %11
+  %12 = getelementptr i8, ptr undef, i64 %11
   %13 = mul nsw i64 %6, %1
   %14 = add i64 %7, %13
   %15 = shl i64 %14, 3
-  %16 = getelementptr i8, i8* undef, i64 %15
+  %16 = getelementptr i8, ptr undef, i64 %15
   br i1 undef, label %L.LB1_662, label %L.LB1_662.prol
 
 L.LB1_662.prol:                                   ; preds = %L.LB1_662.prol, %L.LB1_769
@@ -65,24 +65,24 @@ L.LB1_662:                                        ; preds = %L.LB1_437.2, %L.LB1
   %indvars.iv19 = phi i64 [ %indvars.iv.next20.3, %L.LB1_437.2 ], [ 0, %L.LB1_769 ], [ %indvars.iv.next20.prol, %L.LB1_662.prol ]
   %indvars.iv.next20 = add nuw nsw i64 %indvars.iv19, 1
   %17 = mul i64 %indvars.iv.next20, %3
-  %18 = getelementptr i8, i8* %16, i64 %17
-  %19 = bitcast i8* %18 to double*
-  store double 0.000000e+00, double* %19, align 8
+  %18 = getelementptr i8, ptr %16, i64 %17
+  %19 = bitcast ptr %18 to ptr
+  store double 0.000000e+00, ptr %19, align 8
   %indvars.iv.next20.1 = add nsw i64 %indvars.iv19, 2
   %20 = mul i64 %indvars.iv.next20.1, %3
   br i1 undef, label %L.LB1_437.2, label %L.LB1_824.2
 
 L.LB1_427:                                        ; preds = %L.LB1_425
-  %21 = load i64, i64* bitcast (i8* getelementptr inbounds (%struct.BSS1.0.9.28.39.43.46.47.54.56.57.64.65.69.71.144, %struct.BSS1.0.9.28.39.43.46.47.54.56.57.64.65.69.71.144* @.BSS1, i64 0, i32 0, i64 8) to i64*), align 8
+  %21 = load i64, ptr getelementptr inbounds (%struct.BSS1.0.9.28.39.43.46.47.54.56.57.64.65.69.71.144, ptr @.BSS1, i64 0, i32 0, i64 8), align 8
   br label %L.LB1_425
 
 L.LB1_805:                                        ; preds = %L.LB1_816
   ret void
 
 L.LB1_824.2:                                      ; preds = %L.LB1_662
-  %22 = getelementptr i8, i8* %12, i64 %20
-  %23 = bitcast i8* %22 to double*
-  store double 0.000000e+00, double* %23, align 8
+  %22 = getelementptr i8, ptr %12, i64 %20
+  %23 = bitcast ptr %22 to ptr
+  store double 0.000000e+00, ptr %23, align 8
   br label %L.LB1_437.2
 
 L.LB1_437.2:                                      ; preds = %L.LB1_824.2, %L.LB1_662

diff  --git a/llvm/test/CodeGen/PowerPC/sink-down-more-instructions-1.mir b/llvm/test/CodeGen/PowerPC/sink-down-more-instructions-1.mir
index a726027270b32..fe50d05ab93a5 100644
--- a/llvm/test/CodeGen/PowerPC/sink-down-more-instructions-1.mir
+++ b/llvm/test/CodeGen/PowerPC/sink-down-more-instructions-1.mir
@@ -9,7 +9,7 @@
   target triple = "powerpc64le-unknown-linux-gnu"
 
   ; Function Attrs: nofree norecurse nounwind
-  define dso_local signext i32 @foo(i32 signext %0, i32 signext %1, i32* nocapture readonly %2, i32* nocapture %3, i32 signext %4) local_unnamed_addr #0 {
+  define dso_local signext i32 @foo(i32 signext %0, i32 signext %1, ptr nocapture readonly %2, ptr nocapture %3, i32 signext %4) local_unnamed_addr #0 {
     %6 = icmp sgt i32 %4, 0
     br i1 %6, label %7, label %37
 
@@ -20,10 +20,10 @@
 
   10:                                               ; preds = %7
     %11 = and i64 %8, 4294967294
-    %scevgep20 = getelementptr i32, i32* %2, i64 -2
-    %scevgep2021 = bitcast i32* %scevgep20 to i8*
-    %scevgep22 = getelementptr i32, i32* %3, i64 -2
-    %scevgep2223 = bitcast i32* %scevgep22 to i8*
+    %scevgep20 = getelementptr i32, ptr %2, i64 -2
+    %scevgep2021 = bitcast ptr %scevgep20 to ptr
+    %scevgep22 = getelementptr i32, ptr %3, i64 -2
+    %scevgep2223 = bitcast ptr %scevgep22 to ptr
     %12 = add nsw i64 %11, -2
     %13 = lshr i64 %12, 1
     %14 = add nuw i64 %13, 1
@@ -43,8 +43,8 @@
     br i1 %22, label %37, label %23
 
   23:                                               ; preds = %17
-    %24 = getelementptr inbounds i32, i32* %2, i64 %18
-    %25 = load i32, i32* %24, align 4, !tbaa !2
+    %24 = getelementptr inbounds i32, ptr %2, i64 %18
+    %25 = load i32, ptr %24, align 4, !tbaa !2
     %26 = add nsw i32 %25, %20
     switch i32 %0, label %30 [
       i32 1, label %27
@@ -64,8 +64,8 @@
   33:                                               ; preds = %30, %27, %23
     %34 = phi i32 [ %32, %30 ], [ %29, %27 ], [ %19, %23 ]
     %35 = add nsw i32 %34, %26
-    %36 = getelementptr inbounds i32, i32* %3, i64 %18
-    store i32 %35, i32* %36, align 4, !tbaa !2
+    %36 = getelementptr inbounds i32, ptr %3, i64 %18
+    store i32 %35, ptr %36, align 4, !tbaa !2
     br label %37
 
   37:                                               ; preds = %33, %17, %5
@@ -74,18 +74,18 @@
   38:                                               ; preds = %74, %10
     %39 = phi i64 [ 0, %10 ], [ %78, %74 ]
     %40 = phi i32 [ 0, %10 ], [ %66, %74 ]
-    %41 = phi i8* [ %scevgep2021, %10 ], [ %45, %74 ]
-    %42 = phi i8* [ %scevgep2223, %10 ], [ %43, %74 ]
-    %43 = getelementptr i8, i8* %42, i64 8
-    %44 = bitcast i8* %43 to i32*
-    %45 = getelementptr i8, i8* %41, i64 8
-    %46 = bitcast i8* %45 to i32*
+    %41 = phi ptr [ %scevgep2021, %10 ], [ %45, %74 ]
+    %42 = phi ptr [ %scevgep2223, %10 ], [ %43, %74 ]
+    %43 = getelementptr i8, ptr %42, i64 8
+    %44 = bitcast ptr %43 to ptr
+    %45 = getelementptr i8, ptr %41, i64 8
+    %46 = bitcast ptr %45 to ptr
     %lsr19 = trunc i64 %39 to i32
     %47 = udiv i32 %lsr19, 30
     %48 = mul nsw i32 %47, -30
     %49 = zext i32 %48 to i64
     %50 = add nuw nsw i64 %49, 1
-    %51 = load i32, i32* %46, align 4, !tbaa !2
+    %51 = load i32, ptr %46, align 4, !tbaa !2
     %52 = add nsw i32 %51, %40
     switch i32 %0, label %58 [
       i32 1, label %53
@@ -109,11 +109,11 @@
   60:                                               ; preds = %58, %56, %53
     %61 = phi i32 [ %tmp15, %58 ], [ %57, %56 ], [ %55, %53 ]
     %62 = add nsw i32 %61, %52
-    store i32 %62, i32* %44, align 4, !tbaa !2
+    store i32 %62, ptr %44, align 4, !tbaa !2
     %63 = or i64 %39, 1
-    %64 = getelementptr i8, i8* %45, i64 4
-    %uglygep1112.cast = bitcast i8* %64 to i32*
-    %65 = load i32, i32* %uglygep1112.cast, align 4, !tbaa !2
+    %64 = getelementptr i8, ptr %45, i64 4
+    %uglygep1112.cast = bitcast ptr %64 to ptr
+    %65 = load i32, ptr %uglygep1112.cast, align 4, !tbaa !2
     %66 = add nsw i32 %65, %52
     switch i32 %0, label %72 [
       i32 1, label %69
@@ -137,9 +137,9 @@
   74:                                               ; preds = %72, %69, %67
     %75 = phi i32 [ %tmp, %72 ], [ %68, %67 ], [ %71, %69 ]
     %76 = add nsw i32 %75, %66
-    %77 = getelementptr i8, i8* %43, i64 4
-    %uglygep78.cast = bitcast i8* %77 to i32*
-    store i32 %76, i32* %uglygep78.cast, align 4, !tbaa !2
+    %77 = getelementptr i8, ptr %43, i64 4
+    %uglygep78.cast = bitcast ptr %77 to ptr
+    store i32 %76, ptr %uglygep78.cast, align 4, !tbaa !2
     %78 = add nuw nsw i64 %39, 2
     %79 = add i64 %78, -2
     %tmp18 = trunc i64 %79 to i32

diff  --git a/llvm/test/CodeGen/PowerPC/sink-down-more-instructions-regpressure-high.mir b/llvm/test/CodeGen/PowerPC/sink-down-more-instructions-regpressure-high.mir
index f1a3f58531d5c..4069fec4216e3 100644
--- a/llvm/test/CodeGen/PowerPC/sink-down-more-instructions-regpressure-high.mir
+++ b/llvm/test/CodeGen/PowerPC/sink-down-more-instructions-regpressure-high.mir
@@ -12,18 +12,18 @@
   ; register pressure in destination block.
 
   ; Function Attrs: nofree norecurse nounwind
-  define dso_local signext i32 @foo(i32 signext %0, i32 signext %1, i32* nocapture readonly %2, i32* nocapture %3, i32 signext %4, i32* nocapture readonly %5, i32* nocapture readonly %6, i32* nocapture readonly %7, i32* nocapture readonly %8, i32* nocapture readonly %9, i32* nocapture readonly %10, i32* nocapture readonly %11, i32* nocapture readonly %12, i32* nocapture readonly %13, i32* nocapture readonly %14, i32* nocapture readonly %15, i32* nocapture readonly %16, i32* nocapture readonly %17, i32* nocapture readonly %18, i32* nocapture readonly %19, i32* nocapture readonly %20, i32* nocapture readonly %21, i32* nocapture readonly %22, i32* nocapture readonly %23, i32* nocapture readonly %24, i32* nocapture readonly %25, i32* nocapture readonly %26, i32* nocapture readonly %27, i32* nocapture readonly %28, i32* nocapture readonly %29, i32* nocapture readonly %30, i32* nocapture readonly %31, i32* nocapture readonly %32, i32* nocapture readonly %33, i32* nocapture readonly %34, i32* nocapture readonly %35, i32* nocapture readonly %36) local_unnamed_addr #0 {
+  define dso_local signext i32 @foo(i32 signext %0, i32 signext %1, ptr nocapture readonly %2, ptr nocapture %3, i32 signext %4, ptr nocapture readonly %5, ptr nocapture readonly %6, ptr nocapture readonly %7, ptr nocapture readonly %8, ptr nocapture readonly %9, ptr nocapture readonly %10, ptr nocapture readonly %11, ptr nocapture readonly %12, ptr nocapture readonly %13, ptr nocapture readonly %14, ptr nocapture readonly %15, ptr nocapture readonly %16, ptr nocapture readonly %17, ptr nocapture readonly %18, ptr nocapture readonly %19, ptr nocapture readonly %20, ptr nocapture readonly %21, ptr nocapture readonly %22, ptr nocapture readonly %23, ptr nocapture readonly %24, ptr nocapture readonly %25, ptr nocapture readonly %26, ptr nocapture readonly %27, ptr nocapture readonly %28, ptr nocapture readonly %29, ptr nocapture readonly %30, ptr nocapture readonly %31, ptr nocapture readonly %32, ptr nocapture readonly %33, ptr nocapture readonly %34, ptr nocapture readonly %35, ptr nocapture readonly %36) local_unnamed_addr #0 {
     %38 = icmp sgt i32 %4, 0
     br i1 %38, label %39, label %41
 
   39:                                               ; preds = %37
     %40 = zext i32 %4 to i64
-    %scevgep = getelementptr i32, i32* %2, i64 -1
-    %scevgep69 = bitcast i32* %scevgep to i8*
-    %scevgep70 = getelementptr i32, i32* %5, i64 -1
-    %scevgep7071 = bitcast i32* %scevgep70 to i8*
-    %scevgep72 = getelementptr i32, i32* %6, i64 -1
-    %scevgep7273 = bitcast i32* %scevgep72 to i8*
+    %scevgep = getelementptr i32, ptr %2, i64 -1
+    %scevgep69 = bitcast ptr %scevgep to ptr
+    %scevgep70 = getelementptr i32, ptr %5, i64 -1
+    %scevgep7071 = bitcast ptr %scevgep70 to ptr
+    %scevgep72 = getelementptr i32, ptr %6, i64 -1
+    %scevgep7273 = bitcast ptr %scevgep72 to ptr
     call void @llvm.set.loop.iterations.i64(i64 %40)
     br label %42
 
@@ -34,20 +34,20 @@
     %lsr.iv = phi i64 [ %lsr.iv.next, %65 ], [ 0, %39 ]
     %43 = phi i64 [ 0, %39 ], [ %163, %65 ]
     %44 = phi i32 [ 0, %39 ], [ %58, %65 ]
-    %45 = phi i8* [ %scevgep69, %39 ], [ %52, %65 ]
-    %46 = phi i8* [ %scevgep7071, %39 ], [ %50, %65 ]
-    %47 = phi i8* [ %scevgep7273, %39 ], [ %48, %65 ]
-    %48 = getelementptr i8, i8* %47, i64 4
-    %49 = bitcast i8* %48 to i32*
-    %50 = getelementptr i8, i8* %46, i64 4
-    %51 = bitcast i8* %50 to i32*
-    %52 = getelementptr i8, i8* %45, i64 4
-    %53 = bitcast i8* %52 to i32*
+    %45 = phi ptr [ %scevgep69, %39 ], [ %52, %65 ]
+    %46 = phi ptr [ %scevgep7071, %39 ], [ %50, %65 ]
+    %47 = phi ptr [ %scevgep7273, %39 ], [ %48, %65 ]
+    %48 = getelementptr i8, ptr %47, i64 4
+    %49 = bitcast ptr %48 to ptr
+    %50 = getelementptr i8, ptr %46, i64 4
+    %51 = bitcast ptr %50 to ptr
+    %52 = getelementptr i8, ptr %45, i64 4
+    %53 = bitcast ptr %52 to ptr
     %lsr68 = trunc i64 %43 to i32
     %54 = udiv i32 %lsr68, 30
     %55 = mul nuw nsw i32 %54, 30
     %56 = sub i32 %lsr68, %55
-    %57 = load i32, i32* %53, align 4, !tbaa !2
+    %57 = load i32, ptr %53, align 4, !tbaa !2
     %58 = add nsw i32 %57, %44
     switch i32 %0, label %64 [
       i32 1, label %59
@@ -68,165 +68,165 @@
 
   65:                                               ; preds = %64, %62, %59
     %66 = phi i32 [ %56, %64 ], [ %63, %62 ], [ %61, %59 ]
-    %67 = bitcast i32* %7 to i8*
-    %68 = bitcast i32* %8 to i8*
-    %69 = bitcast i32* %9 to i8*
-    %70 = bitcast i32* %10 to i8*
-    %71 = bitcast i32* %11 to i8*
-    %72 = bitcast i32* %12 to i8*
-    %73 = bitcast i32* %13 to i8*
-    %74 = bitcast i32* %14 to i8*
-    %75 = bitcast i32* %15 to i8*
-    %76 = bitcast i32* %16 to i8*
-    %77 = bitcast i32* %17 to i8*
-    %78 = bitcast i32* %18 to i8*
-    %79 = bitcast i32* %19 to i8*
-    %80 = bitcast i32* %20 to i8*
-    %81 = bitcast i32* %21 to i8*
-    %82 = bitcast i32* %22 to i8*
-    %83 = bitcast i32* %23 to i8*
-    %84 = bitcast i32* %24 to i8*
-    %85 = bitcast i32* %25 to i8*
-    %86 = bitcast i32* %26 to i8*
-    %87 = bitcast i32* %27 to i8*
-    %88 = bitcast i32* %28 to i8*
-    %89 = bitcast i32* %29 to i8*
-    %90 = bitcast i32* %30 to i8*
-    %91 = bitcast i32* %31 to i8*
-    %92 = bitcast i32* %32 to i8*
-    %93 = bitcast i32* %33 to i8*
-    %94 = bitcast i32* %34 to i8*
-    %95 = bitcast i32* %35 to i8*
-    %96 = bitcast i32* %36 to i8*
-    %97 = bitcast i32* %3 to i8*
+    %67 = bitcast ptr %7 to ptr
+    %68 = bitcast ptr %8 to ptr
+    %69 = bitcast ptr %9 to ptr
+    %70 = bitcast ptr %10 to ptr
+    %71 = bitcast ptr %11 to ptr
+    %72 = bitcast ptr %12 to ptr
+    %73 = bitcast ptr %13 to ptr
+    %74 = bitcast ptr %14 to ptr
+    %75 = bitcast ptr %15 to ptr
+    %76 = bitcast ptr %16 to ptr
+    %77 = bitcast ptr %17 to ptr
+    %78 = bitcast ptr %18 to ptr
+    %79 = bitcast ptr %19 to ptr
+    %80 = bitcast ptr %20 to ptr
+    %81 = bitcast ptr %21 to ptr
+    %82 = bitcast ptr %22 to ptr
+    %83 = bitcast ptr %23 to ptr
+    %84 = bitcast ptr %24 to ptr
+    %85 = bitcast ptr %25 to ptr
+    %86 = bitcast ptr %26 to ptr
+    %87 = bitcast ptr %27 to ptr
+    %88 = bitcast ptr %28 to ptr
+    %89 = bitcast ptr %29 to ptr
+    %90 = bitcast ptr %30 to ptr
+    %91 = bitcast ptr %31 to ptr
+    %92 = bitcast ptr %32 to ptr
+    %93 = bitcast ptr %33 to ptr
+    %94 = bitcast ptr %34 to ptr
+    %95 = bitcast ptr %35 to ptr
+    %96 = bitcast ptr %36 to ptr
+    %97 = bitcast ptr %3 to ptr
     %98 = add nsw i32 %66, %58
-    %99 = load i32, i32* %51, align 4, !tbaa !2
+    %99 = load i32, ptr %51, align 4, !tbaa !2
     %100 = add nsw i32 %98, %99
-    %101 = load i32, i32* %49, align 4, !tbaa !2
+    %101 = load i32, ptr %49, align 4, !tbaa !2
     %102 = add nsw i32 %100, %101
-    %uglygep60 = getelementptr i8, i8* %67, i64 %lsr.iv
-    %uglygep6061 = bitcast i8* %uglygep60 to i32*
-    %103 = load i32, i32* %uglygep6061, align 4, !tbaa !2
+    %uglygep60 = getelementptr i8, ptr %67, i64 %lsr.iv
+    %uglygep6061 = bitcast ptr %uglygep60 to ptr
+    %103 = load i32, ptr %uglygep6061, align 4, !tbaa !2
     %104 = add nsw i32 %102, %103
-    %uglygep58 = getelementptr i8, i8* %68, i64 %lsr.iv
-    %uglygep5859 = bitcast i8* %uglygep58 to i32*
-    %105 = load i32, i32* %uglygep5859, align 4, !tbaa !2
+    %uglygep58 = getelementptr i8, ptr %68, i64 %lsr.iv
+    %uglygep5859 = bitcast ptr %uglygep58 to ptr
+    %105 = load i32, ptr %uglygep5859, align 4, !tbaa !2
     %106 = add nsw i32 %104, %105
-    %uglygep56 = getelementptr i8, i8* %69, i64 %lsr.iv
-    %uglygep5657 = bitcast i8* %uglygep56 to i32*
-    %107 = load i32, i32* %uglygep5657, align 4, !tbaa !2
+    %uglygep56 = getelementptr i8, ptr %69, i64 %lsr.iv
+    %uglygep5657 = bitcast ptr %uglygep56 to ptr
+    %107 = load i32, ptr %uglygep5657, align 4, !tbaa !2
     %108 = add nsw i32 %106, %107
-    %uglygep54 = getelementptr i8, i8* %70, i64 %lsr.iv
-    %uglygep5455 = bitcast i8* %uglygep54 to i32*
-    %109 = load i32, i32* %uglygep5455, align 4, !tbaa !2
+    %uglygep54 = getelementptr i8, ptr %70, i64 %lsr.iv
+    %uglygep5455 = bitcast ptr %uglygep54 to ptr
+    %109 = load i32, ptr %uglygep5455, align 4, !tbaa !2
     %110 = add nsw i32 %108, %109
-    %uglygep52 = getelementptr i8, i8* %71, i64 %lsr.iv
-    %uglygep5253 = bitcast i8* %uglygep52 to i32*
-    %111 = load i32, i32* %uglygep5253, align 4, !tbaa !2
+    %uglygep52 = getelementptr i8, ptr %71, i64 %lsr.iv
+    %uglygep5253 = bitcast ptr %uglygep52 to ptr
+    %111 = load i32, ptr %uglygep5253, align 4, !tbaa !2
     %112 = add nsw i32 %110, %111
-    %uglygep50 = getelementptr i8, i8* %72, i64 %lsr.iv
-    %uglygep5051 = bitcast i8* %uglygep50 to i32*
-    %113 = load i32, i32* %uglygep5051, align 4, !tbaa !2
+    %uglygep50 = getelementptr i8, ptr %72, i64 %lsr.iv
+    %uglygep5051 = bitcast ptr %uglygep50 to ptr
+    %113 = load i32, ptr %uglygep5051, align 4, !tbaa !2
     %114 = add nsw i32 %112, %113
-    %uglygep48 = getelementptr i8, i8* %73, i64 %lsr.iv
-    %uglygep4849 = bitcast i8* %uglygep48 to i32*
-    %115 = load i32, i32* %uglygep4849, align 4, !tbaa !2
+    %uglygep48 = getelementptr i8, ptr %73, i64 %lsr.iv
+    %uglygep4849 = bitcast ptr %uglygep48 to ptr
+    %115 = load i32, ptr %uglygep4849, align 4, !tbaa !2
     %116 = add nsw i32 %114, %115
-    %uglygep46 = getelementptr i8, i8* %74, i64 %lsr.iv
-    %uglygep4647 = bitcast i8* %uglygep46 to i32*
-    %117 = load i32, i32* %uglygep4647, align 4, !tbaa !2
+    %uglygep46 = getelementptr i8, ptr %74, i64 %lsr.iv
+    %uglygep4647 = bitcast ptr %uglygep46 to ptr
+    %117 = load i32, ptr %uglygep4647, align 4, !tbaa !2
     %118 = add nsw i32 %116, %117
-    %uglygep44 = getelementptr i8, i8* %75, i64 %lsr.iv
-    %uglygep4445 = bitcast i8* %uglygep44 to i32*
-    %119 = load i32, i32* %uglygep4445, align 4, !tbaa !2
+    %uglygep44 = getelementptr i8, ptr %75, i64 %lsr.iv
+    %uglygep4445 = bitcast ptr %uglygep44 to ptr
+    %119 = load i32, ptr %uglygep4445, align 4, !tbaa !2
     %120 = add nsw i32 %118, %119
-    %uglygep42 = getelementptr i8, i8* %76, i64 %lsr.iv
-    %uglygep4243 = bitcast i8* %uglygep42 to i32*
-    %121 = load i32, i32* %uglygep4243, align 4, !tbaa !2
+    %uglygep42 = getelementptr i8, ptr %76, i64 %lsr.iv
+    %uglygep4243 = bitcast ptr %uglygep42 to ptr
+    %121 = load i32, ptr %uglygep4243, align 4, !tbaa !2
     %122 = add nsw i32 %120, %121
-    %uglygep40 = getelementptr i8, i8* %77, i64 %lsr.iv
-    %uglygep4041 = bitcast i8* %uglygep40 to i32*
-    %123 = load i32, i32* %uglygep4041, align 4, !tbaa !2
+    %uglygep40 = getelementptr i8, ptr %77, i64 %lsr.iv
+    %uglygep4041 = bitcast ptr %uglygep40 to ptr
+    %123 = load i32, ptr %uglygep4041, align 4, !tbaa !2
     %124 = add nsw i32 %122, %123
-    %uglygep38 = getelementptr i8, i8* %78, i64 %lsr.iv
-    %uglygep3839 = bitcast i8* %uglygep38 to i32*
-    %125 = load i32, i32* %uglygep3839, align 4, !tbaa !2
+    %uglygep38 = getelementptr i8, ptr %78, i64 %lsr.iv
+    %uglygep3839 = bitcast ptr %uglygep38 to ptr
+    %125 = load i32, ptr %uglygep3839, align 4, !tbaa !2
     %126 = add nsw i32 %124, %125
-    %uglygep36 = getelementptr i8, i8* %79, i64 %lsr.iv
-    %uglygep3637 = bitcast i8* %uglygep36 to i32*
-    %127 = load i32, i32* %uglygep3637, align 4, !tbaa !2
+    %uglygep36 = getelementptr i8, ptr %79, i64 %lsr.iv
+    %uglygep3637 = bitcast ptr %uglygep36 to ptr
+    %127 = load i32, ptr %uglygep3637, align 4, !tbaa !2
     %128 = add nsw i32 %126, %127
-    %uglygep34 = getelementptr i8, i8* %80, i64 %lsr.iv
-    %uglygep3435 = bitcast i8* %uglygep34 to i32*
-    %129 = load i32, i32* %uglygep3435, align 4, !tbaa !2
+    %uglygep34 = getelementptr i8, ptr %80, i64 %lsr.iv
+    %uglygep3435 = bitcast ptr %uglygep34 to ptr
+    %129 = load i32, ptr %uglygep3435, align 4, !tbaa !2
     %130 = add nsw i32 %128, %129
-    %uglygep32 = getelementptr i8, i8* %81, i64 %lsr.iv
-    %uglygep3233 = bitcast i8* %uglygep32 to i32*
-    %131 = load i32, i32* %uglygep3233, align 4, !tbaa !2
+    %uglygep32 = getelementptr i8, ptr %81, i64 %lsr.iv
+    %uglygep3233 = bitcast ptr %uglygep32 to ptr
+    %131 = load i32, ptr %uglygep3233, align 4, !tbaa !2
     %132 = add nsw i32 %130, %131
-    %uglygep30 = getelementptr i8, i8* %82, i64 %lsr.iv
-    %uglygep3031 = bitcast i8* %uglygep30 to i32*
-    %133 = load i32, i32* %uglygep3031, align 4, !tbaa !2
+    %uglygep30 = getelementptr i8, ptr %82, i64 %lsr.iv
+    %uglygep3031 = bitcast ptr %uglygep30 to ptr
+    %133 = load i32, ptr %uglygep3031, align 4, !tbaa !2
     %134 = add nsw i32 %132, %133
-    %uglygep28 = getelementptr i8, i8* %83, i64 %lsr.iv
-    %uglygep2829 = bitcast i8* %uglygep28 to i32*
-    %135 = load i32, i32* %uglygep2829, align 4, !tbaa !2
+    %uglygep28 = getelementptr i8, ptr %83, i64 %lsr.iv
+    %uglygep2829 = bitcast ptr %uglygep28 to ptr
+    %135 = load i32, ptr %uglygep2829, align 4, !tbaa !2
     %136 = add nsw i32 %134, %135
-    %uglygep26 = getelementptr i8, i8* %84, i64 %lsr.iv
-    %uglygep2627 = bitcast i8* %uglygep26 to i32*
-    %137 = load i32, i32* %uglygep2627, align 4, !tbaa !2
+    %uglygep26 = getelementptr i8, ptr %84, i64 %lsr.iv
+    %uglygep2627 = bitcast ptr %uglygep26 to ptr
+    %137 = load i32, ptr %uglygep2627, align 4, !tbaa !2
     %138 = add nsw i32 %136, %137
-    %uglygep24 = getelementptr i8, i8* %85, i64 %lsr.iv
-    %uglygep2425 = bitcast i8* %uglygep24 to i32*
-    %139 = load i32, i32* %uglygep2425, align 4, !tbaa !2
+    %uglygep24 = getelementptr i8, ptr %85, i64 %lsr.iv
+    %uglygep2425 = bitcast ptr %uglygep24 to ptr
+    %139 = load i32, ptr %uglygep2425, align 4, !tbaa !2
     %140 = add nsw i32 %138, %139
-    %uglygep22 = getelementptr i8, i8* %86, i64 %lsr.iv
-    %uglygep2223 = bitcast i8* %uglygep22 to i32*
-    %141 = load i32, i32* %uglygep2223, align 4, !tbaa !2
+    %uglygep22 = getelementptr i8, ptr %86, i64 %lsr.iv
+    %uglygep2223 = bitcast ptr %uglygep22 to ptr
+    %141 = load i32, ptr %uglygep2223, align 4, !tbaa !2
     %142 = add nsw i32 %140, %141
-    %uglygep20 = getelementptr i8, i8* %87, i64 %lsr.iv
-    %uglygep2021 = bitcast i8* %uglygep20 to i32*
-    %143 = load i32, i32* %uglygep2021, align 4, !tbaa !2
+    %uglygep20 = getelementptr i8, ptr %87, i64 %lsr.iv
+    %uglygep2021 = bitcast ptr %uglygep20 to ptr
+    %143 = load i32, ptr %uglygep2021, align 4, !tbaa !2
     %144 = add nsw i32 %142, %143
-    %uglygep18 = getelementptr i8, i8* %88, i64 %lsr.iv
-    %uglygep1819 = bitcast i8* %uglygep18 to i32*
-    %145 = load i32, i32* %uglygep1819, align 4, !tbaa !2
+    %uglygep18 = getelementptr i8, ptr %88, i64 %lsr.iv
+    %uglygep1819 = bitcast ptr %uglygep18 to ptr
+    %145 = load i32, ptr %uglygep1819, align 4, !tbaa !2
     %146 = add nsw i32 %144, %145
-    %uglygep16 = getelementptr i8, i8* %89, i64 %lsr.iv
-    %uglygep1617 = bitcast i8* %uglygep16 to i32*
-    %147 = load i32, i32* %uglygep1617, align 4, !tbaa !2
+    %uglygep16 = getelementptr i8, ptr %89, i64 %lsr.iv
+    %uglygep1617 = bitcast ptr %uglygep16 to ptr
+    %147 = load i32, ptr %uglygep1617, align 4, !tbaa !2
     %148 = add nsw i32 %146, %147
-    %uglygep14 = getelementptr i8, i8* %90, i64 %lsr.iv
-    %uglygep1415 = bitcast i8* %uglygep14 to i32*
-    %149 = load i32, i32* %uglygep1415, align 4, !tbaa !2
+    %uglygep14 = getelementptr i8, ptr %90, i64 %lsr.iv
+    %uglygep1415 = bitcast ptr %uglygep14 to ptr
+    %149 = load i32, ptr %uglygep1415, align 4, !tbaa !2
     %150 = add nsw i32 %148, %149
-    %uglygep12 = getelementptr i8, i8* %91, i64 %lsr.iv
-    %uglygep1213 = bitcast i8* %uglygep12 to i32*
-    %151 = load i32, i32* %uglygep1213, align 4, !tbaa !2
+    %uglygep12 = getelementptr i8, ptr %91, i64 %lsr.iv
+    %uglygep1213 = bitcast ptr %uglygep12 to ptr
+    %151 = load i32, ptr %uglygep1213, align 4, !tbaa !2
     %152 = add nsw i32 %150, %151
-    %uglygep10 = getelementptr i8, i8* %92, i64 %lsr.iv
-    %uglygep1011 = bitcast i8* %uglygep10 to i32*
-    %153 = load i32, i32* %uglygep1011, align 4, !tbaa !2
+    %uglygep10 = getelementptr i8, ptr %92, i64 %lsr.iv
+    %uglygep1011 = bitcast ptr %uglygep10 to ptr
+    %153 = load i32, ptr %uglygep1011, align 4, !tbaa !2
     %154 = add nsw i32 %152, %153
-    %uglygep8 = getelementptr i8, i8* %93, i64 %lsr.iv
-    %uglygep89 = bitcast i8* %uglygep8 to i32*
-    %155 = load i32, i32* %uglygep89, align 4, !tbaa !2
+    %uglygep8 = getelementptr i8, ptr %93, i64 %lsr.iv
+    %uglygep89 = bitcast ptr %uglygep8 to ptr
+    %155 = load i32, ptr %uglygep89, align 4, !tbaa !2
     %156 = add nsw i32 %154, %155
-    %uglygep6 = getelementptr i8, i8* %94, i64 %lsr.iv
-    %uglygep67 = bitcast i8* %uglygep6 to i32*
-    %157 = load i32, i32* %uglygep67, align 4, !tbaa !2
+    %uglygep6 = getelementptr i8, ptr %94, i64 %lsr.iv
+    %uglygep67 = bitcast ptr %uglygep6 to ptr
+    %157 = load i32, ptr %uglygep67, align 4, !tbaa !2
     %158 = add nsw i32 %156, %157
-    %uglygep4 = getelementptr i8, i8* %95, i64 %lsr.iv
-    %uglygep45 = bitcast i8* %uglygep4 to i32*
-    %159 = load i32, i32* %uglygep45, align 4, !tbaa !2
+    %uglygep4 = getelementptr i8, ptr %95, i64 %lsr.iv
+    %uglygep45 = bitcast ptr %uglygep4 to ptr
+    %159 = load i32, ptr %uglygep45, align 4, !tbaa !2
     %160 = add nsw i32 %158, %159
-    %uglygep2 = getelementptr i8, i8* %96, i64 %lsr.iv
-    %uglygep23 = bitcast i8* %uglygep2 to i32*
-    %161 = load i32, i32* %uglygep23, align 4, !tbaa !2
+    %uglygep2 = getelementptr i8, ptr %96, i64 %lsr.iv
+    %uglygep23 = bitcast ptr %uglygep2 to ptr
+    %161 = load i32, ptr %uglygep23, align 4, !tbaa !2
     %162 = add nsw i32 %160, %161
-    %uglygep = getelementptr i8, i8* %97, i64 %lsr.iv
-    %uglygep1 = bitcast i8* %uglygep to i32*
-    store i32 %162, i32* %uglygep1, align 4, !tbaa !2
+    %uglygep = getelementptr i8, ptr %97, i64 %lsr.iv
+    %uglygep1 = bitcast ptr %uglygep to ptr
+    store i32 %162, ptr %uglygep1, align 4, !tbaa !2
     %163 = add nuw nsw i64 %43, 1
     %lsr.iv.next = add nuw nsw i64 %lsr.iv, 4
     %164 = call i1 @llvm.loop.decrement.i64(i64 1)

diff  --git a/llvm/test/CodeGen/PowerPC/sms-phi-1.ll b/llvm/test/CodeGen/PowerPC/sms-phi-1.ll
index 782edba77c0c5..516d54ba2fdbe 100644
--- a/llvm/test/CodeGen/PowerPC/sms-phi-1.ll
+++ b/llvm/test/CodeGen/PowerPC/sms-phi-1.ll
@@ -41,8 +41,8 @@ define void @main() nounwind #0 {
 ; CHECK-NEXT:    blr
   %1 = tail call i64 @strtol()
   %2 = trunc i64 %1 to i32
-  %3 = tail call noalias i8* @calloc()
-  %4 = bitcast i8* %3 to i32*
+  %3 = tail call noalias ptr @calloc()
+  %4 = bitcast ptr %3 to ptr
   %5 = zext i32 %2 to i64
   br label %6
 
@@ -50,8 +50,8 @@ define void @main() nounwind #0 {
   %7 = phi i64 [ %11, %6 ], [ 0, %0 ]
   %8 = trunc i64 %7 to i32
   %9 = mul nsw i32 %8, %8
-  %10 = getelementptr inbounds i32, i32* %4, i64 %7
-  store i32 %9, i32* %10, align 4
+  %10 = getelementptr inbounds i32, ptr %4, i64 %7
+  store i32 %9, ptr %10, align 4
   %11 = add nuw nsw i64 %7, 1
   %12 = icmp eq i64 %11, %5
   br i1 %12, label %13, label %6
@@ -60,5 +60,5 @@ define void @main() nounwind #0 {
   ret void
 }
 
-declare i8* @calloc() local_unnamed_addr
+declare ptr @calloc() local_unnamed_addr
 declare i64 @strtol() local_unnamed_addr

diff  --git a/llvm/test/CodeGen/PowerPC/sms-phi-3.ll b/llvm/test/CodeGen/PowerPC/sms-phi-3.ll
index 4cd60c69da30e..628822edabf39 100644
--- a/llvm/test/CodeGen/PowerPC/sms-phi-3.ll
+++ b/llvm/test/CodeGen/PowerPC/sms-phi-3.ll
@@ -3,9 +3,9 @@
 ; RUN:       -mcpu=pwr9 --ppc-enable-pipeliner 2>&1 | FileCheck %s
 
 %0 = type { double, double, double, i32, i32 }
-declare i8* @malloc() local_unnamed_addr
+declare ptr @malloc() local_unnamed_addr
 
-define void @phi3(i32*) nounwind {
+define void @phi3(ptr) nounwind {
 ; CHECK-LABEL: phi3:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    mflr 0
@@ -58,10 +58,10 @@ define void @phi3(i32*) nounwind {
 ; CHECK-NEXT:    ld 29, -24(1) # 8-byte Folded Reload
 ; CHECK-NEXT:    mtlr 0
 ; CHECK-NEXT:    blr
-  %2 = tail call noalias i8* @malloc()
-  %3 = bitcast i8* %2 to %0**
-  %4 = tail call noalias i8* @malloc()
-  %5 = bitcast i8* %4 to %0*
+  %2 = tail call noalias ptr @malloc()
+  %3 = bitcast ptr %2 to ptr
+  %4 = tail call noalias ptr @malloc()
+  %5 = bitcast ptr %4 to ptr
   br label %6
 
 6:                                                ; preds = %6, %1
@@ -69,11 +69,11 @@ define void @phi3(i32*) nounwind {
   %8 = phi i32 [ %15, %6 ], [ 0, %1 ]
   %9 = phi i64 [ %17, %6 ], [ undef, %1 ]
   %10 = sext i32 %8 to i64
-  %11 = getelementptr inbounds %0, %0* %5, i64 %10
-  %12 = getelementptr inbounds %0*, %0** %3, i64 %7
-  store %0* %11, %0** %12, align 8
-  %13 = getelementptr inbounds i32, i32* %0, i64 %7
-  %14 = load i32, i32* %13, align 4
+  %11 = getelementptr inbounds %0, ptr %5, i64 %10
+  %12 = getelementptr inbounds ptr, ptr %3, i64 %7
+  store ptr %11, ptr %12, align 8
+  %13 = getelementptr inbounds i32, ptr %0, i64 %7
+  %14 = load i32, ptr %13, align 4
   %15 = add nsw i32 %14, %8
   %16 = add nuw nsw i64 %7, 1
   %17 = add i64 %9, -1

diff  --git a/llvm/test/CodeGen/PowerPC/stack-coloring-vararg.mir b/llvm/test/CodeGen/PowerPC/stack-coloring-vararg.mir
index 5eceea7a5bf7f..b6871ccd5ba22 100644
--- a/llvm/test/CodeGen/PowerPC/stack-coloring-vararg.mir
+++ b/llvm/test/CodeGen/PowerPC/stack-coloring-vararg.mir
@@ -17,30 +17,30 @@
   target datalayout = "E-m:e-p:32:32-i64:64-n32"
   target triple = "powerpc-unknown-freebsd13.0"
 
-  %struct.__va_list_tag = type { i8, i8, i16, i8*, i8* }
+  %struct.__va_list_tag = type { i8, i8, i16, ptr, ptr }
   ; Function Attrs: argmemonly nounwind willreturn
-  declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture) #0
-  define dso_local void @atf_tc_fail_nonfatal(i8* %fmt, ...) !dbg !3 {
+  declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #0
+  define dso_local void @atf_tc_fail_nonfatal(ptr %fmt, ...) !dbg !3 {
   entry:
     %buf.i.i = alloca [1024 x i8], align 1
     %ap2.i.i = alloca [1 x %struct.__va_list_tag], align 4
     br i1 undef, label %format_reason_ap.exit.i, label %if.then6.i.i
 
   if.then6.i.i:                                     ; preds = %entry
-    %0 = bitcast [1 x %struct.__va_list_tag]* %ap2.i.i to i8*
-    call void @llvm.lifetime.start.p0i8(i64 12, i8* nonnull %0)
-    call void @llvm.va_copy(i8* nonnull %0, i8* nonnull null)
+    %0 = bitcast ptr %ap2.i.i to ptr
+    call void @llvm.lifetime.start.p0(i64 12, ptr nonnull %0)
+    call void @llvm.va_copy(ptr nonnull %0, ptr nonnull null)
     ret void
 
   format_reason_ap.exit.i:                          ; preds = %entry
-    %1 = bitcast [1024 x i8]* %buf.i.i to i8*
-    call void @llvm.lifetime.start.p0i8(i64 1024, i8* nonnull %1)
-    call void @fprintf(i8* nonnull %1)
+    %1 = bitcast ptr %buf.i.i to ptr
+    call void @llvm.lifetime.start.p0(i64 1024, ptr nonnull %1)
+    call void @fprintf(ptr nonnull %1)
     ret void
   }
-  declare void @fprintf(i8*)
+  declare void @fprintf(ptr)
   ; Function Attrs: nounwind
-  declare void @llvm.va_copy(i8*, i8*) #1
+  declare void @llvm.va_copy(ptr, ptr) #1
 
   attributes #0 = { argmemonly nounwind willreturn }
   attributes #1 = { nounwind }

diff  --git a/llvm/test/CodeGen/PowerPC/toc-load-sched-bug.ll b/llvm/test/CodeGen/PowerPC/toc-load-sched-bug.ll
index 61e0a86340cc8..5cf354f6a453c 100644
--- a/llvm/test/CodeGen/PowerPC/toc-load-sched-bug.ll
+++ b/llvm/test/CodeGen/PowerPC/toc-load-sched-bug.ll
@@ -8,77 +8,77 @@ target triple = "powerpc64le-unknown-linux-gnu"
 ; and the usual stack-adjust instructions that held the TOC restore in
 ; place were optimized away.
 
-%"class.llvm::Module" = type { %"class.llvm::LLVMContext"*, %"class.llvm::iplist", %"class.llvm::iplist.0", %"class.llvm::iplist.9", %"struct.llvm::ilist", %"class.std::basic_string", %"class.llvm::ValueSymbolTable"*, %"class.llvm::StringMap", %"class.std::unique_ptr", %"class.std::basic_string", %"class.std::basic_string", i8*, %"class.llvm::RandomNumberGenerator"*, %"class.std::basic_string", %"class.llvm::DataLayout" }
-%"class.llvm::iplist" = type { %"struct.llvm::ilist_traits", %"class.llvm::GlobalVariable"* }
+%"class.llvm::Module" = type { ptr, %"class.llvm::iplist", %"class.llvm::iplist.0", %"class.llvm::iplist.9", %"struct.llvm::ilist", %"class.std::basic_string", ptr, %"class.llvm::StringMap", %"class.std::unique_ptr", %"class.std::basic_string", %"class.std::basic_string", ptr, ptr, %"class.std::basic_string", %"class.llvm::DataLayout" }
+%"class.llvm::iplist" = type { %"struct.llvm::ilist_traits", ptr }
 %"struct.llvm::ilist_traits" = type { %"class.llvm::ilist_node" }
-%"class.llvm::ilist_node" = type { %"class.llvm::ilist_half_node", %"class.llvm::GlobalVariable"* }
-%"class.llvm::ilist_half_node" = type { %"class.llvm::GlobalVariable"* }
+%"class.llvm::ilist_node" = type { %"class.llvm::ilist_half_node", ptr }
+%"class.llvm::ilist_half_node" = type { ptr }
 %"class.llvm::GlobalVariable" = type { %"class.llvm::GlobalObject", %"class.llvm::ilist_node", i8 }
-%"class.llvm::GlobalObject" = type { %"class.llvm::GlobalValue", %"class.std::basic_string", %"class.llvm::Comdat"* }
-%"class.llvm::GlobalValue" = type { %"class.llvm::Constant", i32, %"class.llvm::Module"* }
+%"class.llvm::GlobalObject" = type { %"class.llvm::GlobalValue", %"class.std::basic_string", ptr }
+%"class.llvm::GlobalValue" = type { %"class.llvm::Constant", i32, ptr }
 %"class.llvm::Constant" = type { %"class.llvm::User" }
-%"class.llvm::User" = type { %"class.llvm::Value.base", i32, %"class.llvm::Use"* }
-%"class.llvm::Value.base" = type <{ i32 (...)**, %"class.llvm::Type"*, %"class.llvm::Use"*, %"class.llvm::StringMapEntry"*, i8, i8, i16 }>
-%"class.llvm::Type" = type { %"class.llvm::LLVMContext"*, i32, i32, %"class.llvm::Type"** }
+%"class.llvm::User" = type { %"class.llvm::Value.base", i32, ptr }
+%"class.llvm::Value.base" = type <{ ptr, ptr, ptr, ptr, i8, i8, i16 }>
+%"class.llvm::Type" = type { ptr, i32, i32, ptr }
 %"class.llvm::StringMapEntry" = type opaque
-%"class.llvm::Use" = type { %"class.llvm::Value"*, %"class.llvm::Use"*, %"class.llvm::PointerIntPair" }
-%"class.llvm::Value" = type { i32 (...)**, %"class.llvm::Type"*, %"class.llvm::Use"*, %"class.llvm::StringMapEntry"*, i8, i8, i16 }
+%"class.llvm::Use" = type { ptr, ptr, %"class.llvm::PointerIntPair" }
+%"class.llvm::Value" = type { ptr, ptr, ptr, ptr, i8, i8, i16 }
 %"class.llvm::PointerIntPair" = type { i64 }
-%"class.llvm::Comdat" = type { %"class.llvm::StringMapEntry.43"*, i32 }
+%"class.llvm::Comdat" = type { ptr, i32 }
 %"class.llvm::StringMapEntry.43" = type opaque
-%"class.llvm::iplist.0" = type { %"struct.llvm::ilist_traits.1", %"class.llvm::Function"* }
+%"class.llvm::iplist.0" = type { %"struct.llvm::ilist_traits.1", ptr }
 %"struct.llvm::ilist_traits.1" = type { %"class.llvm::ilist_node.7" }
-%"class.llvm::ilist_node.7" = type { %"class.llvm::ilist_half_node.8", %"class.llvm::Function"* }
-%"class.llvm::ilist_half_node.8" = type { %"class.llvm::Function"* }
-%"class.llvm::Function" = type { %"class.llvm::GlobalObject", %"class.llvm::ilist_node.7", %"class.llvm::iplist.44", %"class.llvm::iplist.52", %"class.llvm::ValueSymbolTable"*, %"class.llvm::AttributeSet" }
-%"class.llvm::iplist.44" = type { %"struct.llvm::ilist_traits.45", %"class.llvm::BasicBlock"* }
+%"class.llvm::ilist_node.7" = type { %"class.llvm::ilist_half_node.8", ptr }
+%"class.llvm::ilist_half_node.8" = type { ptr }
+%"class.llvm::Function" = type { %"class.llvm::GlobalObject", %"class.llvm::ilist_node.7", %"class.llvm::iplist.44", %"class.llvm::iplist.52", ptr, %"class.llvm::AttributeSet" }
+%"class.llvm::iplist.44" = type { %"struct.llvm::ilist_traits.45", ptr }
 %"struct.llvm::ilist_traits.45" = type { %"class.llvm::ilist_half_node.51" }
-%"class.llvm::ilist_half_node.51" = type { %"class.llvm::BasicBlock"* }
-%"class.llvm::BasicBlock" = type { %"class.llvm::Value.base", %"class.llvm::ilist_node.61", %"class.llvm::iplist.62", %"class.llvm::Function"* }
-%"class.llvm::ilist_node.61" = type { %"class.llvm::ilist_half_node.51", %"class.llvm::BasicBlock"* }
-%"class.llvm::iplist.62" = type { %"struct.llvm::ilist_traits.63", %"class.llvm::Instruction"* }
+%"class.llvm::ilist_half_node.51" = type { ptr }
+%"class.llvm::BasicBlock" = type { %"class.llvm::Value.base", %"class.llvm::ilist_node.61", %"class.llvm::iplist.62", ptr }
+%"class.llvm::ilist_node.61" = type { %"class.llvm::ilist_half_node.51", ptr }
+%"class.llvm::iplist.62" = type { %"struct.llvm::ilist_traits.63", ptr }
 %"struct.llvm::ilist_traits.63" = type { %"class.llvm::ilist_half_node.69" }
-%"class.llvm::ilist_half_node.69" = type { %"class.llvm::Instruction"* }
-%"class.llvm::Instruction" = type { %"class.llvm::User", %"class.llvm::ilist_node.70", %"class.llvm::BasicBlock"*, %"class.llvm::DebugLoc" }
-%"class.llvm::ilist_node.70" = type { %"class.llvm::ilist_half_node.69", %"class.llvm::Instruction"* }
+%"class.llvm::ilist_half_node.69" = type { ptr }
+%"class.llvm::Instruction" = type { %"class.llvm::User", %"class.llvm::ilist_node.70", ptr, %"class.llvm::DebugLoc" }
+%"class.llvm::ilist_node.70" = type { %"class.llvm::ilist_half_node.69", ptr }
 %"class.llvm::DebugLoc" = type { i32, i32 }
-%"class.llvm::iplist.52" = type { %"struct.llvm::ilist_traits.53", %"class.llvm::Argument"* }
+%"class.llvm::iplist.52" = type { %"struct.llvm::ilist_traits.53", ptr }
 %"struct.llvm::ilist_traits.53" = type { %"class.llvm::ilist_half_node.59" }
-%"class.llvm::ilist_half_node.59" = type { %"class.llvm::Argument"* }
-%"class.llvm::Argument" = type { %"class.llvm::Value.base", %"class.llvm::ilist_node.60", %"class.llvm::Function"* }
-%"class.llvm::ilist_node.60" = type { %"class.llvm::ilist_half_node.59", %"class.llvm::Argument"* }
-%"class.llvm::AttributeSet" = type { %"class.llvm::AttributeSetImpl"* }
+%"class.llvm::ilist_half_node.59" = type { ptr }
+%"class.llvm::Argument" = type { %"class.llvm::Value.base", %"class.llvm::ilist_node.60", ptr }
+%"class.llvm::ilist_node.60" = type { %"class.llvm::ilist_half_node.59", ptr }
+%"class.llvm::AttributeSet" = type { ptr }
 %"class.llvm::AttributeSetImpl" = type opaque
-%"class.llvm::iplist.9" = type { %"struct.llvm::ilist_traits.10", %"class.llvm::GlobalAlias"* }
+%"class.llvm::iplist.9" = type { %"struct.llvm::ilist_traits.10", ptr }
 %"struct.llvm::ilist_traits.10" = type { %"class.llvm::ilist_node.16" }
-%"class.llvm::ilist_node.16" = type { %"class.llvm::ilist_half_node.17", %"class.llvm::GlobalAlias"* }
-%"class.llvm::ilist_half_node.17" = type { %"class.llvm::GlobalAlias"* }
+%"class.llvm::ilist_node.16" = type { %"class.llvm::ilist_half_node.17", ptr }
+%"class.llvm::ilist_half_node.17" = type { ptr }
 %"class.llvm::GlobalAlias" = type { %"class.llvm::GlobalValue", %"class.llvm::ilist_node.16" }
 %"struct.llvm::ilist" = type { %"class.llvm::iplist.18" }
-%"class.llvm::iplist.18" = type { %"struct.llvm::ilist_traits.19", %"class.llvm::NamedMDNode"* }
+%"class.llvm::iplist.18" = type { %"struct.llvm::ilist_traits.19", ptr }
 %"struct.llvm::ilist_traits.19" = type { %"class.llvm::ilist_node.24" }
-%"class.llvm::ilist_node.24" = type { %"class.llvm::ilist_half_node.25", %"class.llvm::NamedMDNode"* }
-%"class.llvm::ilist_half_node.25" = type { %"class.llvm::NamedMDNode"* }
-%"class.llvm::NamedMDNode" = type { %"class.llvm::ilist_node.24", %"class.std::basic_string", %"class.llvm::Module"*, i8* }
+%"class.llvm::ilist_node.24" = type { %"class.llvm::ilist_half_node.25", ptr }
+%"class.llvm::ilist_half_node.25" = type { ptr }
+%"class.llvm::NamedMDNode" = type { %"class.llvm::ilist_node.24", %"class.std::basic_string", ptr, ptr }
 %"class.llvm::ValueSymbolTable" = type opaque
 %"class.llvm::StringMap" = type { %"class.llvm::StringMapImpl", %"class.llvm::MallocAllocator" }
-%"class.llvm::StringMapImpl" = type { %"class.llvm::StringMapEntryBase"**, i32, i32, i32, i32 }
+%"class.llvm::StringMapImpl" = type { ptr, i32, i32, i32, i32 }
 %"class.llvm::StringMapEntryBase" = type { i32 }
 %"class.llvm::MallocAllocator" = type { i8 }
 %"class.std::unique_ptr" = type { %"class.std::tuple" }
 %"class.std::tuple" = type { %"struct.std::_Tuple_impl" }
 %"struct.std::_Tuple_impl" = type { %"struct.std::_Head_base.28" }
-%"struct.std::_Head_base.28" = type { %"class.llvm::GVMaterializer"* }
+%"struct.std::_Head_base.28" = type { ptr }
 %"class.llvm::GVMaterializer" = type opaque
 %"class.llvm::RandomNumberGenerator" = type opaque
 %"class.std::basic_string" = type { %"struct.std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_Alloc_hider" }
-%"struct.std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_Alloc_hider" = type { i8* }
-%"class.llvm::DataLayout" = type { i8, i32, i32, [4 x i8], %"class.llvm::SmallVector", %"class.llvm::SmallVector.29", %"class.llvm::SmallVector.36", i8* }
+%"struct.std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_Alloc_hider" = type { ptr }
+%"class.llvm::DataLayout" = type { i8, i32, i32, [4 x i8], %"class.llvm::SmallVector", %"class.llvm::SmallVector.29", %"class.llvm::SmallVector.36", ptr }
 %"class.llvm::SmallVector" = type { %"class.llvm::SmallVectorImpl.base", %"struct.llvm::SmallVectorStorage" }
 %"class.llvm::SmallVectorImpl.base" = type { %"class.llvm::SmallVectorTemplateBase.base" }
 %"class.llvm::SmallVectorTemplateBase.base" = type { %"class.llvm::SmallVectorTemplateCommon.base" }
 %"class.llvm::SmallVectorTemplateCommon.base" = type <{ %"class.llvm::SmallVectorBase", %"struct.llvm::AlignedCharArrayUnion" }>
-%"class.llvm::SmallVectorBase" = type { i8*, i8*, i8* }
+%"class.llvm::SmallVectorBase" = type { ptr, ptr, ptr }
 %"struct.llvm::AlignedCharArrayUnion" = type { %"struct.llvm::AlignedCharArray" }
 %"struct.llvm::AlignedCharArray" = type { [1 x i8] }
 %"struct.llvm::SmallVectorStorage" = type { [7 x %"struct.llvm::AlignedCharArrayUnion"] }
@@ -96,20 +96,20 @@ target triple = "powerpc64le-unknown-linux-gnu"
 %"struct.llvm::AlignedCharArrayUnion.40" = type { %"struct.llvm::AlignedCharArray.41" }
 %"struct.llvm::AlignedCharArray.41" = type { [16 x i8] }
 %"struct.llvm::SmallVectorStorage.42" = type { [7 x %"struct.llvm::AlignedCharArrayUnion.40"] }
-%"class.llvm::SMDiagnostic" = type { %"class.llvm::SourceMgr"*, %"class.llvm::SMLoc", %"class.std::basic_string", i32, i32, i32, %"class.std::basic_string", %"class.std::basic_string", %"class.std::vector.79", %"class.llvm::SmallVector.84" }
-%"class.llvm::SourceMgr" = type { %"class.std::vector", %"class.std::vector.74", i8*, void (%"class.llvm::SMDiagnostic"*, i8*)*, i8* }
+%"class.llvm::SMDiagnostic" = type { ptr, %"class.llvm::SMLoc", %"class.std::basic_string", i32, i32, i32, %"class.std::basic_string", %"class.std::basic_string", %"class.std::vector.79", %"class.llvm::SmallVector.84" }
+%"class.llvm::SourceMgr" = type { %"class.std::vector", %"class.std::vector.74", ptr, ptr, ptr }
 %"class.std::vector" = type { %"struct.std::_Vector_base" }
 %"struct.std::_Vector_base" = type { %"struct.std::_Vector_base<llvm::SourceMgr::SrcBuffer, std::allocator<llvm::SourceMgr::SrcBuffer> >::_Vector_impl" }
-%"struct.std::_Vector_base<llvm::SourceMgr::SrcBuffer, std::allocator<llvm::SourceMgr::SrcBuffer> >::_Vector_impl" = type { %"struct.llvm::SourceMgr::SrcBuffer"*, %"struct.llvm::SourceMgr::SrcBuffer"*, %"struct.llvm::SourceMgr::SrcBuffer"* }
-%"struct.llvm::SourceMgr::SrcBuffer" = type { %"class.llvm::MemoryBuffer"*, %"class.llvm::SMLoc" }
-%"class.llvm::MemoryBuffer" = type { i32 (...)**, i8*, i8* }
+%"struct.std::_Vector_base<llvm::SourceMgr::SrcBuffer, std::allocator<llvm::SourceMgr::SrcBuffer> >::_Vector_impl" = type { ptr, ptr, ptr }
+%"struct.llvm::SourceMgr::SrcBuffer" = type { ptr, %"class.llvm::SMLoc" }
+%"class.llvm::MemoryBuffer" = type { ptr, ptr, ptr }
 %"class.std::vector.74" = type { %"struct.std::_Vector_base.75" }
 %"struct.std::_Vector_base.75" = type { %"struct.std::_Vector_base<std::basic_string<char>, std::allocator<std::basic_string<char> > >::_Vector_impl" }
-%"struct.std::_Vector_base<std::basic_string<char>, std::allocator<std::basic_string<char> > >::_Vector_impl" = type { %"class.std::basic_string"*, %"class.std::basic_string"*, %"class.std::basic_string"* }
-%"class.llvm::SMLoc" = type { i8* }
+%"struct.std::_Vector_base<std::basic_string<char>, std::allocator<std::basic_string<char> > >::_Vector_impl" = type { ptr, ptr, ptr }
+%"class.llvm::SMLoc" = type { ptr }
 %"class.std::vector.79" = type { %"struct.std::_Vector_base.80" }
 %"struct.std::_Vector_base.80" = type { %"struct.std::_Vector_base<std::pair<unsigned int, unsigned int>, std::allocator<std::pair<unsigned int, unsigned int> > >::_Vector_impl" }
-%"struct.std::_Vector_base<std::pair<unsigned int, unsigned int>, std::allocator<std::pair<unsigned int, unsigned int> > >::_Vector_impl" = type { %"struct.std::pair"*, %"struct.std::pair"*, %"struct.std::pair"* }
+%"struct.std::_Vector_base<std::pair<unsigned int, unsigned int>, std::allocator<std::pair<unsigned int, unsigned int> > >::_Vector_impl" = type { ptr, ptr, ptr }
 %"struct.std::pair" = type { i32, i32 }
 %"class.llvm::SmallVector.84" = type { %"class.llvm::SmallVectorImpl.85", %"struct.llvm::SmallVectorStorage.90" }
 %"class.llvm::SmallVectorImpl.85" = type { %"class.llvm::SmallVectorTemplateBase.86" }
@@ -118,7 +118,7 @@ target triple = "powerpc64le-unknown-linux-gnu"
 %"struct.llvm::AlignedCharArrayUnion.88" = type { %"struct.llvm::AlignedCharArray.89" }
 %"struct.llvm::AlignedCharArray.89" = type { [24 x i8] }
 %"struct.llvm::SmallVectorStorage.90" = type { [3 x %"struct.llvm::AlignedCharArrayUnion.88"] }
-%"class.llvm::LLVMContext" = type { %"class.llvm::LLVMContextImpl"* }
+%"class.llvm::LLVMContext" = type { ptr }
 %"class.llvm::LLVMContextImpl" = type opaque
 %"class.std::allocator" = type { i8 }
 %"class.llvm::ErrorOr.109" = type { %union.anon.110, i8, [7 x i8] }
@@ -127,26 +127,26 @@ target triple = "powerpc64le-unknown-linux-gnu"
 %"struct.llvm::AlignedCharArray.94" = type { [16 x i8] }
 %"class.llvm::ErrorOr" = type { %union.anon, i8, [7 x i8] }
 %union.anon = type { %"struct.llvm::AlignedCharArrayUnion.93" }
-%"class.std::error_category" = type { i32 (...)** }
+%"class.std::error_category" = type { ptr }
 %"struct.std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_Rep" = type { %"struct.std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_Rep_base" }
 %"struct.std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_Rep_base" = type { i64, i64, i32 }
 %"class.llvm::SMFixIt" = type { %"class.llvm::SMRange", %"class.std::basic_string" }
 %"class.llvm::SMRange" = type { %"class.llvm::SMLoc", %"class.llvm::SMLoc" }
 %"struct.llvm::NamedRegionTimer" = type { %"class.llvm::TimeRegion" }
-%"class.llvm::TimeRegion" = type { %"class.llvm::Timer"* }
-%"class.llvm::Timer" = type { %"class.llvm::TimeRecord", %"class.std::basic_string", i8, %"class.llvm::TimerGroup"*, %"class.llvm::Timer"**, %"class.llvm::Timer"* }
+%"class.llvm::TimeRegion" = type { ptr }
+%"class.llvm::Timer" = type { %"class.llvm::TimeRecord", %"class.std::basic_string", i8, ptr, ptr, ptr }
 %"class.llvm::TimeRecord" = type { double, double, double, i64 }
-%"class.llvm::TimerGroup" = type { %"class.std::basic_string", %"class.llvm::Timer"*, %"class.std::vector.103", %"class.llvm::TimerGroup"**, %"class.llvm::TimerGroup"* }
+%"class.llvm::TimerGroup" = type { %"class.std::basic_string", ptr, %"class.std::vector.103", ptr, ptr }
 %"class.std::vector.103" = type { %"struct.std::_Vector_base.104" }
 %"struct.std::_Vector_base.104" = type { %"struct.std::_Vector_base<std::pair<llvm::TimeRecord, std::basic_string<char> >, std::allocator<std::pair<llvm::TimeRecord, std::basic_string<char> > > >::_Vector_impl" }
-%"struct.std::_Vector_base<std::pair<llvm::TimeRecord, std::basic_string<char> >, std::allocator<std::pair<llvm::TimeRecord, std::basic_string<char> > > >::_Vector_impl" = type { %"struct.std::pair.108"*, %"struct.std::pair.108"*, %"struct.std::pair.108"* }
+%"struct.std::_Vector_base<std::pair<llvm::TimeRecord, std::basic_string<char> >, std::allocator<std::pair<llvm::TimeRecord, std::basic_string<char> > > >::_Vector_impl" = type { ptr, ptr, ptr }
 %"struct.std::pair.108" = type opaque
 %struct.LLVMOpaqueContext = type opaque
 %struct.LLVMOpaqueMemoryBuffer = type opaque
 %struct.LLVMOpaqueModule = type opaque
-%"class.llvm::raw_string_ostream" = type { %"class.llvm::raw_ostream.base", %"class.std::basic_string"* }
-%"class.llvm::raw_ostream.base" = type <{ i32 (...)**, i8*, i8*, i8*, i32 }>
-%"class.llvm::raw_ostream" = type { i32 (...)**, i8*, i8*, i8*, i32 }
+%"class.llvm::raw_string_ostream" = type { %"class.llvm::raw_ostream.base", ptr }
+%"class.llvm::raw_ostream.base" = type <{ ptr, ptr, ptr, ptr, i32 }>
+%"class.llvm::raw_ostream" = type { ptr, ptr, ptr, ptr, i32 }
 
 @.str = private unnamed_addr constant [28 x i8] c"Could not open input file: \00", align 1
 @.str1 = private unnamed_addr constant [54 x i8] c"!HasError && \22Cannot get value when an error exists!\22\00", align 1
@@ -154,12 +154,12 @@ target triple = "powerpc64le-unknown-linux-gnu"
 @__PRETTY_FUNCTION__._ZN4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE10getStorageEv = private unnamed_addr constant [206 x i8] c"storage_type *llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer, std::default_delete<llvm::MemoryBuffer> > >::getStorage() [T = std::unique_ptr<llvm::MemoryBuffer, std::default_delete<llvm::MemoryBuffer> >]\00", align 1
 @_ZNSs4_Rep20_S_empty_rep_storageE = external global [0 x i64]
 
-declare void @_ZN4llvm12MemoryBuffer14getFileOrSTDINENS_9StringRefEl(%"class.llvm::ErrorOr"* sret(%"class.llvm::ErrorOr"), [2 x i64], i64) #1
+declare void @_ZN4llvm12MemoryBuffer14getFileOrSTDINENS_9StringRefEl(ptr sret(%"class.llvm::ErrorOr"), [2 x i64], i64) #1
 
-declare void @_ZN4llvm16NamedRegionTimerC1ENS_9StringRefES1_b(%"struct.llvm::NamedRegionTimer"*, [2 x i64], [2 x i64], i1 zeroext) #1
+declare void @_ZN4llvm16NamedRegionTimerC1ENS_9StringRefES1_b(ptr, [2 x i64], [2 x i64], i1 zeroext) #1
 
 ; Function Attrs: nounwind
-define %"class.llvm::Module"* @_ZN4llvm11ParseIRFileERKSsRNS_12SMDiagnosticERNS_11LLVMContextE(%"class.std::basic_string"* nocapture readonly dereferenceable(8) %Filename, %"class.llvm::SMDiagnostic"* dereferenceable(200) %Err, %"class.llvm::LLVMContext"* dereferenceable(8) %Context) #0 {
+define ptr @_ZN4llvm11ParseIRFileERKSsRNS_12SMDiagnosticERNS_11LLVMContextE(ptr nocapture readonly dereferenceable(8) %Filename, ptr dereferenceable(200) %Err, ptr dereferenceable(8) %Context) #0 {
 entry:
 ; CHECK: .globl	_ZN4llvm11ParseIRFileERKSsRNS_12SMDiagnosticERNS_11LLVMContextE
 ; CHECK: bctrl
@@ -176,173 +176,173 @@ entry:
   %FileOrErr = alloca %"class.llvm::ErrorOr", align 8
   %ref.tmp = alloca %"class.llvm::SMDiagnostic", align 8
   %ref.tmp5 = alloca %"class.std::basic_string", align 8
-  %_M_p.i.i.i = getelementptr inbounds %"class.std::basic_string", %"class.std::basic_string"* %Filename, i64 0, i32 0, i32 0
-  %0 = load i8*, i8** %_M_p.i.i.i, align 8, !tbaa !1
-  %1 = ptrtoint i8* %0 to i64
-  %arrayidx.i.i.i = getelementptr inbounds i8, i8* %0, i64 -24
-  %_M_length.i.i = bitcast i8* %arrayidx.i.i.i to i64*
-  %2 = load i64, i64* %_M_length.i.i, align 8, !tbaa !7
+  %_M_p.i.i.i = getelementptr inbounds %"class.std::basic_string", ptr %Filename, i64 0, i32 0, i32 0
+  %0 = load ptr, ptr %_M_p.i.i.i, align 8, !tbaa !1
+  %1 = ptrtoint ptr %0 to i64
+  %arrayidx.i.i.i = getelementptr inbounds i8, ptr %0, i64 -24
+  %_M_length.i.i = bitcast ptr %arrayidx.i.i.i to ptr
+  %2 = load i64, ptr %_M_length.i.i, align 8, !tbaa !7
   %.fca.0.insert18 = insertvalue [2 x i64] undef, i64 %1, 0
   %.fca.1.insert21 = insertvalue [2 x i64] %.fca.0.insert18, i64 %2, 1
-  call void @_ZN4llvm12MemoryBuffer14getFileOrSTDINENS_9StringRefEl(%"class.llvm::ErrorOr"* sret(%"class.llvm::ErrorOr") %FileOrErr, [2 x i64] %.fca.1.insert21, i64 -1) #3
-  %HasError.i24 = getelementptr inbounds %"class.llvm::ErrorOr", %"class.llvm::ErrorOr"* %FileOrErr, i64 0, i32 1
-  %bf.load.i25 = load i8, i8* %HasError.i24, align 8
+  call void @_ZN4llvm12MemoryBuffer14getFileOrSTDINENS_9StringRefEl(ptr sret(%"class.llvm::ErrorOr") %FileOrErr, [2 x i64] %.fca.1.insert21, i64 -1) #3
+  %HasError.i24 = getelementptr inbounds %"class.llvm::ErrorOr", ptr %FileOrErr, i64 0, i32 1
+  %bf.load.i25 = load i8, ptr %HasError.i24, align 8
   %3 = and i8 %bf.load.i25, 1
   %bf.cast.i26 = icmp eq i8 %3, 0
   br i1 %bf.cast.i26, label %_ZN4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE3getEv.exit, label %_ZNK4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE8getErrorEv.exit
 
 _ZNK4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE8getErrorEv.exit: ; preds = %entry
-  %retval.sroa.0.0..sroa_cast.i = bitcast %"class.llvm::ErrorOr"* %FileOrErr to i64*
-  %retval.sroa.0.0.copyload.i = load i64, i64* %retval.sroa.0.0..sroa_cast.i, align 8
-  %retval.sroa.3.0..sroa_idx.i = getelementptr inbounds %"class.llvm::ErrorOr", %"class.llvm::ErrorOr"* %FileOrErr, i64 0, i32 0, i32 0, i32 0, i32 0, i64 8
-  %retval.sroa.3.0..sroa_cast.i = bitcast i8* %retval.sroa.3.0..sroa_idx.i to i64*
-  %retval.sroa.3.0.copyload.i = load i64, i64* %retval.sroa.3.0..sroa_cast.i, align 8
+  %retval.sroa.0.0..sroa_cast.i = bitcast ptr %FileOrErr to ptr
+  %retval.sroa.0.0.copyload.i = load i64, ptr %retval.sroa.0.0..sroa_cast.i, align 8
+  %retval.sroa.3.0..sroa_idx.i = getelementptr inbounds %"class.llvm::ErrorOr", ptr %FileOrErr, i64 0, i32 0, i32 0, i32 0, i32 0, i64 8
+  %retval.sroa.3.0..sroa_cast.i = bitcast ptr %retval.sroa.3.0..sroa_idx.i to ptr
+  %retval.sroa.3.0.copyload.i = load i64, ptr %retval.sroa.3.0..sroa_cast.i, align 8
   %phitmp = trunc i64 %retval.sroa.0.0.copyload.i to i32
   %cmp.i = icmp eq i32 %phitmp, 0
   br i1 %cmp.i, label %cond.false.i.i, label %if.then
 
 if.then:                                          ; preds = %_ZNK4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE8getErrorEv.exit
-  %.c = inttoptr i64 %retval.sroa.3.0.copyload.i to %"class.std::error_category"*
-  %4 = load i8*, i8** %_M_p.i.i.i, align 8, !tbaa !1
-  %arrayidx.i.i.i30 = getelementptr inbounds i8, i8* %4, i64 -24
-  %_M_length.i.i31 = bitcast i8* %arrayidx.i.i.i30 to i64*
-  %5 = load i64, i64* %_M_length.i.i31, align 8, !tbaa !7
-  %6 = inttoptr i64 %retval.sroa.3.0.copyload.i to void (%"class.std::basic_string"*, %"class.std::error_category"*, i32)***
-  %vtable.i = load void (%"class.std::basic_string"*, %"class.std::error_category"*, i32)**, void (%"class.std::basic_string"*, %"class.std::error_category"*, i32)*** %6, align 8, !tbaa !11
-  %vfn.i = getelementptr inbounds void (%"class.std::basic_string"*, %"class.std::error_category"*, i32)*, void (%"class.std::basic_string"*, %"class.std::error_category"*, i32)** %vtable.i, i64 3
-  %7 = load void (%"class.std::basic_string"*, %"class.std::error_category"*, i32)*, void (%"class.std::basic_string"*, %"class.std::error_category"*, i32)** %vfn.i, align 8
-  call void %7(%"class.std::basic_string"* sret(%"class.std::basic_string") %ref.tmp5, %"class.std::error_category"* %.c, i32 signext %phitmp) #3
-  %call2.i.i = call dereferenceable(8) %"class.std::basic_string"* @_ZNSs6insertEmPKcm(%"class.std::basic_string"* %ref.tmp5, i64 0, i8* getelementptr inbounds ([28 x i8], [28 x i8]* @.str, i64 0, i64 0), i64 27) #3
-  %_M_p2.i.i.i.i = getelementptr inbounds %"class.std::basic_string", %"class.std::basic_string"* %call2.i.i, i64 0, i32 0, i32 0
-  %8 = load i8*, i8** %_M_p2.i.i.i.i, align 8, !tbaa !13
-  store i8* bitcast (i64* getelementptr inbounds ([0 x i64], [0 x i64]* @_ZNSs4_Rep20_S_empty_rep_storageE, i64 0, i64 3) to i8*), i8** %_M_p2.i.i.i.i, align 8, !tbaa !1
-  %arrayidx.i.i.i36 = getelementptr inbounds i8, i8* %8, i64 -24
-  %_M_length.i.i37 = bitcast i8* %arrayidx.i.i.i36 to i64*
-  %9 = load i64, i64* %_M_length.i.i37, align 8, !tbaa !7
-  %Filename.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 2
-  %10 = getelementptr inbounds %"class.std::allocator", %"class.std::allocator"* %ref.tmp.i.i2.i, i64 0, i32 0
-  %11 = bitcast %"class.llvm::SMDiagnostic"* %ref.tmp to i8*
-  call void @llvm.memset.p0i8.i64(i8* align 8 %11, i8 0, i64 16, i1 false) #3
-  call void @llvm.lifetime.start.p0i8(i64 1, i8* %10) #3
-  %tobool.i.i4.i = icmp eq i8* %4, null
+  %.c = inttoptr i64 %retval.sroa.3.0.copyload.i to ptr
+  %4 = load ptr, ptr %_M_p.i.i.i, align 8, !tbaa !1
+  %arrayidx.i.i.i30 = getelementptr inbounds i8, ptr %4, i64 -24
+  %_M_length.i.i31 = bitcast ptr %arrayidx.i.i.i30 to ptr
+  %5 = load i64, ptr %_M_length.i.i31, align 8, !tbaa !7
+  %6 = inttoptr i64 %retval.sroa.3.0.copyload.i to ptr
+  %vtable.i = load ptr, ptr %6, align 8, !tbaa !11
+  %vfn.i = getelementptr inbounds ptr, ptr %vtable.i, i64 3
+  %7 = load ptr, ptr %vfn.i, align 8
+  call void %7(ptr sret(%"class.std::basic_string") %ref.tmp5, ptr %.c, i32 signext %phitmp) #3
+  %call2.i.i = call dereferenceable(8) ptr @_ZNSs6insertEmPKcm(ptr %ref.tmp5, i64 0, ptr @.str, i64 27) #3
+  %_M_p2.i.i.i.i = getelementptr inbounds %"class.std::basic_string", ptr %call2.i.i, i64 0, i32 0, i32 0
+  %8 = load ptr, ptr %_M_p2.i.i.i.i, align 8, !tbaa !13
+  store ptr getelementptr inbounds ([0 x i64], ptr @_ZNSs4_Rep20_S_empty_rep_storageE, i64 0, i64 3), ptr %_M_p2.i.i.i.i, align 8, !tbaa !1
+  %arrayidx.i.i.i36 = getelementptr inbounds i8, ptr %8, i64 -24
+  %_M_length.i.i37 = bitcast ptr %arrayidx.i.i.i36 to ptr
+  %9 = load i64, ptr %_M_length.i.i37, align 8, !tbaa !7
+  %Filename.i = getelementptr inbounds %"class.llvm::SMDiagnostic", ptr %ref.tmp, i64 0, i32 2
+  %10 = getelementptr inbounds %"class.std::allocator", ptr %ref.tmp.i.i2.i, i64 0, i32 0
+  %11 = bitcast ptr %ref.tmp to ptr
+  call void @llvm.memset.p0.i64(ptr align 8 %11, i8 0, i64 16, i1 false) #3
+  call void @llvm.lifetime.start.p0(i64 1, ptr %10) #3
+  %tobool.i.i4.i = icmp eq ptr %4, null
   br i1 %tobool.i.i4.i, label %if.then.i.i6.i, label %if.end.i.i8.i
 
 if.then.i.i6.i:                                   ; preds = %if.then
-  %_M_p.i.i.i.i.i.i5.i = getelementptr inbounds %"class.std::basic_string", %"class.std::basic_string"* %Filename.i, i64 0, i32 0, i32 0
-  store i8* bitcast (i64* getelementptr inbounds ([0 x i64], [0 x i64]* @_ZNSs4_Rep20_S_empty_rep_storageE, i64 0, i64 3) to i8*), i8** %_M_p.i.i.i.i.i.i5.i, align 8, !tbaa !13
+  %_M_p.i.i.i.i.i.i5.i = getelementptr inbounds %"class.std::basic_string", ptr %Filename.i, i64 0, i32 0, i32 0
+  store ptr getelementptr inbounds ([0 x i64], ptr @_ZNSs4_Rep20_S_empty_rep_storageE, i64 0, i64 3), ptr %_M_p.i.i.i.i.i.i5.i, align 8, !tbaa !13
   br label %_ZNK4llvm9StringRefcvSsEv.exit9.i
 
 if.end.i.i8.i:                                    ; preds = %if.then
-  call void @_ZNSsC1EPKcmRKSaIcE(%"class.std::basic_string"* %Filename.i, i8* %4, i64 %5, %"class.std::allocator"* dereferenceable(1) %ref.tmp.i.i2.i) #3
+  call void @_ZNSsC1EPKcmRKSaIcE(ptr %Filename.i, ptr %4, i64 %5, ptr dereferenceable(1) %ref.tmp.i.i2.i) #3
   br label %_ZNK4llvm9StringRefcvSsEv.exit9.i
 
 _ZNK4llvm9StringRefcvSsEv.exit9.i:                ; preds = %if.end.i.i8.i, %if.then.i.i6.i
-  call void @llvm.lifetime.end.p0i8(i64 1, i8* %10) #3
-  %LineNo.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 3
-  store i32 -1, i32* %LineNo.i, align 8, !tbaa !14
-  %ColumnNo.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 4
-  store i32 -1, i32* %ColumnNo.i, align 4, !tbaa !21
-  %Kind.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 5
-  store i32 0, i32* %Kind.i, align 8, !tbaa !22
-  %Message.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 6
-  %12 = getelementptr inbounds %"class.std::allocator", %"class.std::allocator"* %ref.tmp.i.i.i, i64 0, i32 0
-  call void @llvm.lifetime.start.p0i8(i64 1, i8* %12) #3
-  %tobool.i.i.i = icmp eq i8* %8, null
+  call void @llvm.lifetime.end.p0(i64 1, ptr %10) #3
+  %LineNo.i = getelementptr inbounds %"class.llvm::SMDiagnostic", ptr %ref.tmp, i64 0, i32 3
+  store i32 -1, ptr %LineNo.i, align 8, !tbaa !14
+  %ColumnNo.i = getelementptr inbounds %"class.llvm::SMDiagnostic", ptr %ref.tmp, i64 0, i32 4
+  store i32 -1, ptr %ColumnNo.i, align 4, !tbaa !21
+  %Kind.i = getelementptr inbounds %"class.llvm::SMDiagnostic", ptr %ref.tmp, i64 0, i32 5
+  store i32 0, ptr %Kind.i, align 8, !tbaa !22
+  %Message.i = getelementptr inbounds %"class.llvm::SMDiagnostic", ptr %ref.tmp, i64 0, i32 6
+  %12 = getelementptr inbounds %"class.std::allocator", ptr %ref.tmp.i.i.i, i64 0, i32 0
+  call void @llvm.lifetime.start.p0(i64 1, ptr %12) #3
+  %tobool.i.i.i = icmp eq ptr %8, null
   br i1 %tobool.i.i.i, label %if.then.i.i.i, label %if.end.i.i.i
 
 if.then.i.i.i:                                    ; preds = %_ZNK4llvm9StringRefcvSsEv.exit9.i
-  %_M_p.i.i.i.i.i.i.i = getelementptr inbounds %"class.std::basic_string", %"class.std::basic_string"* %Message.i, i64 0, i32 0, i32 0
-  store i8* bitcast (i64* getelementptr inbounds ([0 x i64], [0 x i64]* @_ZNSs4_Rep20_S_empty_rep_storageE, i64 0, i64 3) to i8*), i8** %_M_p.i.i.i.i.i.i.i, align 8, !tbaa !13
+  %_M_p.i.i.i.i.i.i.i = getelementptr inbounds %"class.std::basic_string", ptr %Message.i, i64 0, i32 0, i32 0
+  store ptr getelementptr inbounds ([0 x i64], ptr @_ZNSs4_Rep20_S_empty_rep_storageE, i64 0, i64 3), ptr %_M_p.i.i.i.i.i.i.i, align 8, !tbaa !13
   br label %_ZN4llvm12SMDiagnosticC2ENS_9StringRefENS_9SourceMgr8DiagKindES1_.exit
 
 if.end.i.i.i:                                     ; preds = %_ZNK4llvm9StringRefcvSsEv.exit9.i
-  call void @_ZNSsC1EPKcmRKSaIcE(%"class.std::basic_string"* %Message.i, i8* %8, i64 %9, %"class.std::allocator"* dereferenceable(1) %ref.tmp.i.i.i) #3
+  call void @_ZNSsC1EPKcmRKSaIcE(ptr %Message.i, ptr %8, i64 %9, ptr dereferenceable(1) %ref.tmp.i.i.i) #3
   br label %_ZN4llvm12SMDiagnosticC2ENS_9StringRefENS_9SourceMgr8DiagKindES1_.exit
 
 _ZN4llvm12SMDiagnosticC2ENS_9StringRefENS_9SourceMgr8DiagKindES1_.exit: ; preds = %if.then.i.i.i, %if.end.i.i.i
-  call void @llvm.lifetime.end.p0i8(i64 1, i8* %12) #3
-  %_M_p.i.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 7, i32 0, i32 0
-  store i8* bitcast (i64* getelementptr inbounds ([0 x i64], [0 x i64]* @_ZNSs4_Rep20_S_empty_rep_storageE, i64 0, i64 3) to i8*), i8** %_M_p.i.i.i.i.i, align 8, !tbaa !13
-  %Ranges.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 8
-  %13 = bitcast %"class.std::vector.79"* %Ranges.i to i8*
-  call void @llvm.memset.p0i8.i64(i8* align 8 %13, i8 0, i64 24, i1 false) #3
-  %14 = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 9, i32 0, i32 0, i32 0, i32 1, i32 0, i32 0, i64 0
-  %BeginX.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 9, i32 0, i32 0, i32 0, i32 0, i32 0
-  store i8* %14, i8** %BeginX.i.i.i.i.i.i, align 8, !tbaa !23
-  %EndX.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 9, i32 0, i32 0, i32 0, i32 0, i32 1
-  store i8* %14, i8** %EndX.i.i.i.i.i.i, align 8, !tbaa !25
-  %CapacityX.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 9, i32 0, i32 0, i32 0, i32 0, i32 2
-  %add.ptr.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 9, i32 0, i32 0, i32 0, i32 1, i32 0, i32 0, i64 96
-  store i8* %add.ptr.i.i.i.i.i.i, i8** %CapacityX.i.i.i.i.i.i, align 8, !tbaa !26
-  %15 = bitcast %"class.llvm::SMDiagnostic"* %Err to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %15, i8* align 8 %11, i64 16, i1 false) #3
-  %Filename.i38 = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 2
-  call void @_ZNSs4swapERSs(%"class.std::basic_string"* %Filename.i38, %"class.std::basic_string"* dereferenceable(8) %Filename.i) #3
-  %LineNo.i39 = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 3
-  %16 = bitcast i32* %LineNo.i39 to i8*
-  %17 = bitcast i32* %LineNo.i to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %16, i8* align 4 %17, i64 12, i1 false) #3
-  %Message.i40 = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 6
-  call void @_ZNSs4swapERSs(%"class.std::basic_string"* %Message.i40, %"class.std::basic_string"* dereferenceable(8) %Message.i) #3
-  %LineContents.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 7
-  %LineContents7.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 7
-  call void @_ZNSs4swapERSs(%"class.std::basic_string"* %LineContents.i, %"class.std::basic_string"* dereferenceable(8) %LineContents7.i) #3
-  %Ranges.i41 = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 8
-  %_M_start.i7.i.i.i = getelementptr inbounds %"class.std::vector.79", %"class.std::vector.79"* %Ranges.i41, i64 0, i32 0, i32 0, i32 0
-  %18 = load %"struct.std::pair"*, %"struct.std::pair"** %_M_start.i7.i.i.i, align 8, !tbaa !27
-  %_M_finish.i9.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 8, i32 0, i32 0, i32 1
-  %_M_end_of_storage.i11.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 8, i32 0, i32 0, i32 2
-  %_M_start2.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 8, i32 0, i32 0, i32 0
-  %19 = bitcast %"class.std::vector.79"* %Ranges.i41 to i8*
-  call void @llvm.memset.p0i8.i64(i8* align 8 %19, i8 0, i64 16, i1 false) #3
-  %20 = load %"struct.std::pair"*, %"struct.std::pair"** %_M_start2.i.i.i.i, align 8, !tbaa !27
-  store %"struct.std::pair"* %20, %"struct.std::pair"** %_M_start.i7.i.i.i, align 8, !tbaa !27
-  store %"struct.std::pair"* null, %"struct.std::pair"** %_M_start2.i.i.i.i, align 8, !tbaa !27
-  %_M_finish3.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 8, i32 0, i32 0, i32 1
-  %21 = load %"struct.std::pair"*, %"struct.std::pair"** %_M_finish3.i.i.i.i, align 8, !tbaa !27
-  store %"struct.std::pair"* %21, %"struct.std::pair"** %_M_finish.i9.i.i.i, align 8, !tbaa !27
-  store %"struct.std::pair"* null, %"struct.std::pair"** %_M_finish3.i.i.i.i, align 8, !tbaa !27
-  %_M_end_of_storage4.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 8, i32 0, i32 0, i32 2
-  %22 = load %"struct.std::pair"*, %"struct.std::pair"** %_M_end_of_storage4.i.i.i.i, align 8, !tbaa !27
-  store %"struct.std::pair"* %22, %"struct.std::pair"** %_M_end_of_storage.i11.i.i.i, align 8, !tbaa !27
-  store %"struct.std::pair"* null, %"struct.std::pair"** %_M_end_of_storage4.i.i.i.i, align 8, !tbaa !27
-  %tobool.i.i.i.i.i.i = icmp eq %"struct.std::pair"* %18, null
+  call void @llvm.lifetime.end.p0(i64 1, ptr %12) #3
+  %_M_p.i.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", ptr %ref.tmp, i64 0, i32 7, i32 0, i32 0
+  store ptr getelementptr inbounds ([0 x i64], ptr @_ZNSs4_Rep20_S_empty_rep_storageE, i64 0, i64 3), ptr %_M_p.i.i.i.i.i, align 8, !tbaa !13
+  %Ranges.i = getelementptr inbounds %"class.llvm::SMDiagnostic", ptr %ref.tmp, i64 0, i32 8
+  %13 = bitcast ptr %Ranges.i to ptr
+  call void @llvm.memset.p0.i64(ptr align 8 %13, i8 0, i64 24, i1 false) #3
+  %14 = getelementptr inbounds %"class.llvm::SMDiagnostic", ptr %ref.tmp, i64 0, i32 9, i32 0, i32 0, i32 0, i32 1, i32 0, i32 0, i64 0
+  %BeginX.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", ptr %ref.tmp, i64 0, i32 9, i32 0, i32 0, i32 0, i32 0, i32 0
+  store ptr %14, ptr %BeginX.i.i.i.i.i.i, align 8, !tbaa !23
+  %EndX.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", ptr %ref.tmp, i64 0, i32 9, i32 0, i32 0, i32 0, i32 0, i32 1
+  store ptr %14, ptr %EndX.i.i.i.i.i.i, align 8, !tbaa !25
+  %CapacityX.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", ptr %ref.tmp, i64 0, i32 9, i32 0, i32 0, i32 0, i32 0, i32 2
+  %add.ptr.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", ptr %ref.tmp, i64 0, i32 9, i32 0, i32 0, i32 0, i32 1, i32 0, i32 0, i64 96
+  store ptr %add.ptr.i.i.i.i.i.i, ptr %CapacityX.i.i.i.i.i.i, align 8, !tbaa !26
+  %15 = bitcast ptr %Err to ptr
+  call void @llvm.memcpy.p0.p0.i64(ptr align 8 %15, ptr align 8 %11, i64 16, i1 false) #3
+  %Filename.i38 = getelementptr inbounds %"class.llvm::SMDiagnostic", ptr %Err, i64 0, i32 2
+  call void @_ZNSs4swapERSs(ptr %Filename.i38, ptr dereferenceable(8) %Filename.i) #3
+  %LineNo.i39 = getelementptr inbounds %"class.llvm::SMDiagnostic", ptr %Err, i64 0, i32 3
+  %16 = bitcast ptr %LineNo.i39 to ptr
+  %17 = bitcast ptr %LineNo.i to ptr
+  call void @llvm.memcpy.p0.p0.i64(ptr align 4 %16, ptr align 4 %17, i64 12, i1 false) #3
+  %Message.i40 = getelementptr inbounds %"class.llvm::SMDiagnostic", ptr %Err, i64 0, i32 6
+  call void @_ZNSs4swapERSs(ptr %Message.i40, ptr dereferenceable(8) %Message.i) #3
+  %LineContents.i = getelementptr inbounds %"class.llvm::SMDiagnostic", ptr %Err, i64 0, i32 7
+  %LineContents7.i = getelementptr inbounds %"class.llvm::SMDiagnostic", ptr %ref.tmp, i64 0, i32 7
+  call void @_ZNSs4swapERSs(ptr %LineContents.i, ptr dereferenceable(8) %LineContents7.i) #3
+  %Ranges.i41 = getelementptr inbounds %"class.llvm::SMDiagnostic", ptr %Err, i64 0, i32 8
+  %_M_start.i7.i.i.i = getelementptr inbounds %"class.std::vector.79", ptr %Ranges.i41, i64 0, i32 0, i32 0, i32 0
+  %18 = load ptr, ptr %_M_start.i7.i.i.i, align 8, !tbaa !27
+  %_M_finish.i9.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", ptr %Err, i64 0, i32 8, i32 0, i32 0, i32 1
+  %_M_end_of_storage.i11.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", ptr %Err, i64 0, i32 8, i32 0, i32 0, i32 2
+  %_M_start2.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", ptr %ref.tmp, i64 0, i32 8, i32 0, i32 0, i32 0
+  %19 = bitcast ptr %Ranges.i41 to ptr
+  call void @llvm.memset.p0.i64(ptr align 8 %19, i8 0, i64 16, i1 false) #3
+  %20 = load ptr, ptr %_M_start2.i.i.i.i, align 8, !tbaa !27
+  store ptr %20, ptr %_M_start.i7.i.i.i, align 8, !tbaa !27
+  store ptr null, ptr %_M_start2.i.i.i.i, align 8, !tbaa !27
+  %_M_finish3.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", ptr %ref.tmp, i64 0, i32 8, i32 0, i32 0, i32 1
+  %21 = load ptr, ptr %_M_finish3.i.i.i.i, align 8, !tbaa !27
+  store ptr %21, ptr %_M_finish.i9.i.i.i, align 8, !tbaa !27
+  store ptr null, ptr %_M_finish3.i.i.i.i, align 8, !tbaa !27
+  %_M_end_of_storage4.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", ptr %ref.tmp, i64 0, i32 8, i32 0, i32 0, i32 2
+  %22 = load ptr, ptr %_M_end_of_storage4.i.i.i.i, align 8, !tbaa !27
+  store ptr %22, ptr %_M_end_of_storage.i11.i.i.i, align 8, !tbaa !27
+  store ptr null, ptr %_M_end_of_storage4.i.i.i.i, align 8, !tbaa !27
+  %tobool.i.i.i.i.i.i = icmp eq ptr %18, null
   br i1 %tobool.i.i.i.i.i.i, label %_ZN4llvm12SMDiagnosticaSEOS0_.exit, label %if.then.i.i.i.i.i.i
 
 if.then.i.i.i.i.i.i:                              ; preds = %_ZN4llvm12SMDiagnosticC2ENS_9StringRefENS_9SourceMgr8DiagKindES1_.exit
-  %23 = bitcast %"struct.std::pair"* %18 to i8*
-  call void @_ZdlPv(i8* %23) #3
+  %23 = bitcast ptr %18 to ptr
+  call void @_ZdlPv(ptr %23) #3
   br label %_ZN4llvm12SMDiagnosticaSEOS0_.exit
 
 _ZN4llvm12SMDiagnosticaSEOS0_.exit:               ; preds = %_ZN4llvm12SMDiagnosticC2ENS_9StringRefENS_9SourceMgr8DiagKindES1_.exit, %if.then.i.i.i.i.i.i
-  %24 = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 9, i32 0
-  %25 = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 9, i32 0
-  %call2.i.i42 = call dereferenceable(48) %"class.llvm::SmallVectorImpl.85"* @_ZN4llvm15SmallVectorImplINS_7SMFixItEEaSEOS2_(%"class.llvm::SmallVectorImpl.85"* %24, %"class.llvm::SmallVectorImpl.85"* dereferenceable(48) %25) #3
-  call void @_ZN4llvm12SMDiagnosticD2Ev(%"class.llvm::SMDiagnostic"* %ref.tmp) #3
-  %26 = getelementptr inbounds %"class.std::allocator", %"class.std::allocator"* %ref.tmp.i.i, i64 0, i32 0
-  call void @llvm.lifetime.start.p0i8(i64 1, i8* %26) #3
-  %27 = bitcast i8* %arrayidx.i.i.i36 to %"struct.std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_Rep"*
-  %cmp.i.i.i = icmp eq i8* %arrayidx.i.i.i36, bitcast ([0 x i64]* @_ZNSs4_Rep20_S_empty_rep_storageE to i8*)
+  %24 = getelementptr inbounds %"class.llvm::SMDiagnostic", ptr %Err, i64 0, i32 9, i32 0
+  %25 = getelementptr inbounds %"class.llvm::SMDiagnostic", ptr %ref.tmp, i64 0, i32 9, i32 0
+  %call2.i.i42 = call dereferenceable(48) ptr @_ZN4llvm15SmallVectorImplINS_7SMFixItEEaSEOS2_(ptr %24, ptr dereferenceable(48) %25) #3
+  call void @_ZN4llvm12SMDiagnosticD2Ev(ptr %ref.tmp) #3
+  %26 = getelementptr inbounds %"class.std::allocator", ptr %ref.tmp.i.i, i64 0, i32 0
+  call void @llvm.lifetime.start.p0(i64 1, ptr %26) #3
+  %27 = bitcast ptr %arrayidx.i.i.i36 to ptr
+  %cmp.i.i.i = icmp eq ptr %arrayidx.i.i.i36, @_ZNSs4_Rep20_S_empty_rep_storageE
   br i1 %cmp.i.i.i, label %_ZNSsD1Ev.exit, label %if.then.i.i.i45, !prof !28
 
 if.then.i.i.i45:                                  ; preds = %_ZN4llvm12SMDiagnosticaSEOS0_.exit
-  %_M_refcount.i.i.i = getelementptr inbounds i8, i8* %8, i64 -8
-  %28 = bitcast i8* %_M_refcount.i.i.i to i32*
-  br i1 icmp ne (i8* bitcast (i32 (i32*, void (i8*)*)* @__pthread_key_create to i8*), i8* null), label %if.then.i.i.i.i, label %if.else.i.i.i.i
+  %_M_refcount.i.i.i = getelementptr inbounds i8, ptr %8, i64 -8
+  %28 = bitcast ptr %_M_refcount.i.i.i to ptr
+  br i1 icmp ne (ptr @__pthread_key_create, ptr null), label %if.then.i.i.i.i, label %if.else.i.i.i.i
 
 if.then.i.i.i.i:                                  ; preds = %if.then.i.i.i45
-  %.atomicdst.i.i.i.i.i.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..sroa_cast = bitcast i32* %.atomicdst.i.i.i.i.i to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* %.atomicdst.i.i.i.i.i.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..sroa_cast)
-  %29 = atomicrmw volatile add i32* %28, i32 -1 acq_rel
-  store i32 %29, i32* %.atomicdst.i.i.i.i.i, align 4
-  %.atomicdst.i.i.i.i.i.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..atomicdst.0..atomicdst.0..i.i.i.i.i = load volatile i32, i32* %.atomicdst.i.i.i.i.i, align 4
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* %.atomicdst.i.i.i.i.i.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..sroa_cast)
+  %.atomicdst.i.i.i.i.i.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..sroa_cast = bitcast ptr %.atomicdst.i.i.i.i.i to ptr
+  call void @llvm.lifetime.start.p0(i64 4, ptr %.atomicdst.i.i.i.i.i.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..sroa_cast)
+  %29 = atomicrmw volatile add ptr %28, i32 -1 acq_rel
+  store i32 %29, ptr %.atomicdst.i.i.i.i.i, align 4
+  %.atomicdst.i.i.i.i.i.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..atomicdst.0..atomicdst.0..i.i.i.i.i = load volatile i32, ptr %.atomicdst.i.i.i.i.i, align 4
+  call void @llvm.lifetime.end.p0(i64 4, ptr %.atomicdst.i.i.i.i.i.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..sroa_cast)
   br label %_ZN9__gnu_cxxL27__exchange_and_add_dispatchEPii.exit.i.i.i
 
 if.else.i.i.i.i:                                  ; preds = %if.then.i.i.i45
-  %30 = load i32, i32* %28, align 4, !tbaa !29
+  %30 = load i32, ptr %28, align 4, !tbaa !29
   %add.i.i.i.i.i = add nsw i32 %30, -1
-  store i32 %add.i.i.i.i.i, i32* %28, align 4, !tbaa !29
+  store i32 %add.i.i.i.i.i, ptr %28, align 4, !tbaa !29
   br label %_ZN9__gnu_cxxL27__exchange_and_add_dispatchEPii.exit.i.i.i
 
 _ZN9__gnu_cxxL27__exchange_and_add_dispatchEPii.exit.i.i.i: ; preds = %if.else.i.i.i.i, %if.then.i.i.i.i
@@ -351,38 +351,38 @@ _ZN9__gnu_cxxL27__exchange_and_add_dispatchEPii.exit.i.i.i: ; preds = %if.else.i
   br i1 %cmp3.i.i.i, label %if.then4.i.i.i, label %_ZNSsD1Ev.exit
 
 if.then4.i.i.i:                                   ; preds = %_ZN9__gnu_cxxL27__exchange_and_add_dispatchEPii.exit.i.i.i
-  call void @_ZNSs4_Rep10_M_destroyERKSaIcE(%"struct.std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_Rep"* %27, %"class.std::allocator"* dereferenceable(1) %ref.tmp.i.i) #3
+  call void @_ZNSs4_Rep10_M_destroyERKSaIcE(ptr %27, ptr dereferenceable(1) %ref.tmp.i.i) #3
   br label %_ZNSsD1Ev.exit
 
 _ZNSsD1Ev.exit:                                   ; preds = %_ZN4llvm12SMDiagnosticaSEOS0_.exit, %_ZN9__gnu_cxxL27__exchange_and_add_dispatchEPii.exit.i.i.i, %if.then4.i.i.i
-  call void @llvm.lifetime.end.p0i8(i64 1, i8* %26) #3
-  %31 = getelementptr inbounds %"class.std::allocator", %"class.std::allocator"* %ref.tmp.i.i47, i64 0, i32 0
-  call void @llvm.lifetime.start.p0i8(i64 1, i8* %31) #3
-  %_M_p.i.i.i.i48 = getelementptr inbounds %"class.std::basic_string", %"class.std::basic_string"* %ref.tmp5, i64 0, i32 0, i32 0
-  %32 = load i8*, i8** %_M_p.i.i.i.i48, align 8, !tbaa !1
-  %arrayidx.i.i.i49 = getelementptr inbounds i8, i8* %32, i64 -24
-  %33 = bitcast i8* %arrayidx.i.i.i49 to %"struct.std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_Rep"*
-  %cmp.i.i.i50 = icmp eq i8* %arrayidx.i.i.i49, bitcast ([0 x i64]* @_ZNSs4_Rep20_S_empty_rep_storageE to i8*)
+  call void @llvm.lifetime.end.p0(i64 1, ptr %26) #3
+  %31 = getelementptr inbounds %"class.std::allocator", ptr %ref.tmp.i.i47, i64 0, i32 0
+  call void @llvm.lifetime.start.p0(i64 1, ptr %31) #3
+  %_M_p.i.i.i.i48 = getelementptr inbounds %"class.std::basic_string", ptr %ref.tmp5, i64 0, i32 0, i32 0
+  %32 = load ptr, ptr %_M_p.i.i.i.i48, align 8, !tbaa !1
+  %arrayidx.i.i.i49 = getelementptr inbounds i8, ptr %32, i64 -24
+  %33 = bitcast ptr %arrayidx.i.i.i49 to ptr
+  %cmp.i.i.i50 = icmp eq ptr %arrayidx.i.i.i49, @_ZNSs4_Rep20_S_empty_rep_storageE
   br i1 %cmp.i.i.i50, label %_ZNSsD1Ev.exit62, label %if.then.i.i.i52, !prof !28
 
 if.then.i.i.i52:                                  ; preds = %_ZNSsD1Ev.exit
-  %_M_refcount.i.i.i51 = getelementptr inbounds i8, i8* %32, i64 -8
-  %34 = bitcast i8* %_M_refcount.i.i.i51 to i32*
-  br i1 icmp ne (i8* bitcast (i32 (i32*, void (i8*)*)* @__pthread_key_create to i8*), i8* null), label %if.then.i.i.i.i55, label %if.else.i.i.i.i57
+  %_M_refcount.i.i.i51 = getelementptr inbounds i8, ptr %32, i64 -8
+  %34 = bitcast ptr %_M_refcount.i.i.i51 to ptr
+  br i1 icmp ne (ptr @__pthread_key_create, ptr null), label %if.then.i.i.i.i55, label %if.else.i.i.i.i57
 
 if.then.i.i.i.i55:                                ; preds = %if.then.i.i.i52
-  %.atomicdst.i.i.i.i.i46.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..sroa_cast = bitcast i32* %.atomicdst.i.i.i.i.i46 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* %.atomicdst.i.i.i.i.i46.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..sroa_cast)
-  %35 = atomicrmw volatile add i32* %34, i32 -1 acq_rel
-  store i32 %35, i32* %.atomicdst.i.i.i.i.i46, align 4
-  %.atomicdst.i.i.i.i.i46.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..atomicdst.0..atomicdst.0..i.i.i.i.i54 = load volatile i32, i32* %.atomicdst.i.i.i.i.i46, align 4
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* %.atomicdst.i.i.i.i.i46.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..sroa_cast)
+  %.atomicdst.i.i.i.i.i46.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..sroa_cast = bitcast ptr %.atomicdst.i.i.i.i.i46 to ptr
+  call void @llvm.lifetime.start.p0(i64 4, ptr %.atomicdst.i.i.i.i.i46.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..sroa_cast)
+  %35 = atomicrmw volatile add ptr %34, i32 -1 acq_rel
+  store i32 %35, ptr %.atomicdst.i.i.i.i.i46, align 4
+  %.atomicdst.i.i.i.i.i46.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..atomicdst.0..atomicdst.0..i.i.i.i.i54 = load volatile i32, ptr %.atomicdst.i.i.i.i.i46, align 4
+  call void @llvm.lifetime.end.p0(i64 4, ptr %.atomicdst.i.i.i.i.i46.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..sroa_cast)
   br label %_ZN9__gnu_cxxL27__exchange_and_add_dispatchEPii.exit.i.i.i60
 
 if.else.i.i.i.i57:                                ; preds = %if.then.i.i.i52
-  %36 = load i32, i32* %34, align 4, !tbaa !29
+  %36 = load i32, ptr %34, align 4, !tbaa !29
   %add.i.i.i.i.i56 = add nsw i32 %36, -1
-  store i32 %add.i.i.i.i.i56, i32* %34, align 4, !tbaa !29
+  store i32 %add.i.i.i.i.i56, ptr %34, align 4, !tbaa !29
   br label %_ZN9__gnu_cxxL27__exchange_and_add_dispatchEPii.exit.i.i.i60
 
 _ZN9__gnu_cxxL27__exchange_and_add_dispatchEPii.exit.i.i.i60: ; preds = %if.else.i.i.i.i57, %if.then.i.i.i.i55
@@ -391,87 +391,87 @@ _ZN9__gnu_cxxL27__exchange_and_add_dispatchEPii.exit.i.i.i60: ; preds = %if.else
   br i1 %cmp3.i.i.i59, label %if.then4.i.i.i61, label %_ZNSsD1Ev.exit62
 
 if.then4.i.i.i61:                                 ; preds = %_ZN9__gnu_cxxL27__exchange_and_add_dispatchEPii.exit.i.i.i60
-  call void @_ZNSs4_Rep10_M_destroyERKSaIcE(%"struct.std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_Rep"* %33, %"class.std::allocator"* dereferenceable(1) %ref.tmp.i.i47) #3
+  call void @_ZNSs4_Rep10_M_destroyERKSaIcE(ptr %33, ptr dereferenceable(1) %ref.tmp.i.i47) #3
   br label %_ZNSsD1Ev.exit62
 
 _ZNSsD1Ev.exit62:                                 ; preds = %_ZNSsD1Ev.exit, %_ZN9__gnu_cxxL27__exchange_and_add_dispatchEPii.exit.i.i.i60, %if.then4.i.i.i61
-  call void @llvm.lifetime.end.p0i8(i64 1, i8* %31) #3
+  call void @llvm.lifetime.end.p0(i64 1, ptr %31) #3
   br label %cleanup
 
 cond.false.i.i:                                   ; preds = %_ZNK4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE8getErrorEv.exit
-  call void @__assert_fail(i8* getelementptr inbounds ([54 x i8], [54 x i8]* @.str1, i64 0, i64 0), i8* getelementptr inbounds ([61 x i8], [61 x i8]* @.str2, i64 0, i64 0), i32 zeroext 242, i8* getelementptr inbounds ([206 x i8], [206 x i8]* @__PRETTY_FUNCTION__._ZN4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE10getStorageEv, i64 0, i64 0)) #7
+  call void @__assert_fail(ptr @.str1, ptr @.str2, i32 zeroext 242, ptr @__PRETTY_FUNCTION__._ZN4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE10getStorageEv) #7
   unreachable
 
 _ZN4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE3getEv.exit: ; preds = %entry
-  %_M_head_impl.i.i.i.i.i = bitcast %"class.llvm::ErrorOr"* %FileOrErr to %"class.llvm::MemoryBuffer"**
-  %37 = load %"class.llvm::MemoryBuffer"*, %"class.llvm::MemoryBuffer"** %_M_head_impl.i.i.i.i.i, align 8, !tbaa !27
-  %call9 = call %"class.llvm::Module"* @_ZN4llvm7ParseIREPNS_12MemoryBufferERNS_12SMDiagnosticERNS_11LLVMContextE(%"class.llvm::MemoryBuffer"* %37, %"class.llvm::SMDiagnostic"* dereferenceable(200) %Err, %"class.llvm::LLVMContext"* dereferenceable(8) %Context)
+  %_M_head_impl.i.i.i.i.i = bitcast ptr %FileOrErr to ptr
+  %37 = load ptr, ptr %_M_head_impl.i.i.i.i.i, align 8, !tbaa !27
+  %call9 = call ptr @_ZN4llvm7ParseIREPNS_12MemoryBufferERNS_12SMDiagnosticERNS_11LLVMContextE(ptr %37, ptr dereferenceable(200) %Err, ptr dereferenceable(8) %Context)
   br label %cleanup
 
 cleanup:                                          ; preds = %_ZN4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE3getEv.exit, %_ZNSsD1Ev.exit62
-  %retval.0 = phi %"class.llvm::Module"* [ null, %_ZNSsD1Ev.exit62 ], [ %call9, %_ZN4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE3getEv.exit ]
-  %bf.load.i = load i8, i8* %HasError.i24, align 8
+  %retval.0 = phi ptr [ null, %_ZNSsD1Ev.exit62 ], [ %call9, %_ZN4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE3getEv.exit ]
+  %bf.load.i = load i8, ptr %HasError.i24, align 8
   %38 = and i8 %bf.load.i, 1
   %bf.cast.i = icmp eq i8 %38, 0
   br i1 %bf.cast.i, label %_ZN4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE10getStorageEv.exit.i, label %_ZN4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEED2Ev.exit
 
 _ZN4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE10getStorageEv.exit.i: ; preds = %cleanup
-  %_M_head_impl.i.i.i.i.i.i = bitcast %"class.llvm::ErrorOr"* %FileOrErr to %"class.llvm::MemoryBuffer"**
-  %39 = load %"class.llvm::MemoryBuffer"*, %"class.llvm::MemoryBuffer"** %_M_head_impl.i.i.i.i.i.i, align 8, !tbaa !27
-  %cmp.i.i = icmp eq %"class.llvm::MemoryBuffer"* %39, null
+  %_M_head_impl.i.i.i.i.i.i = bitcast ptr %FileOrErr to ptr
+  %39 = load ptr, ptr %_M_head_impl.i.i.i.i.i.i, align 8, !tbaa !27
+  %cmp.i.i = icmp eq ptr %39, null
   br i1 %cmp.i.i, label %_ZNSt10unique_ptrIN4llvm12MemoryBufferESt14default_deleteIS1_EED2Ev.exit.i, label %_ZNKSt14default_deleteIN4llvm12MemoryBufferEEclEPS1_.exit.i.i
 
 _ZNKSt14default_deleteIN4llvm12MemoryBufferEEclEPS1_.exit.i.i: ; preds = %_ZN4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE10getStorageEv.exit.i
-  %40 = bitcast %"class.llvm::MemoryBuffer"* %39 to void (%"class.llvm::MemoryBuffer"*)***
-  %vtable.i.i.i = load void (%"class.llvm::MemoryBuffer"*)**, void (%"class.llvm::MemoryBuffer"*)*** %40, align 8, !tbaa !11
-  %vfn.i.i.i = getelementptr inbounds void (%"class.llvm::MemoryBuffer"*)*, void (%"class.llvm::MemoryBuffer"*)** %vtable.i.i.i, i64 1
-  %41 = load void (%"class.llvm::MemoryBuffer"*)*, void (%"class.llvm::MemoryBuffer"*)** %vfn.i.i.i, align 8
-  call void %41(%"class.llvm::MemoryBuffer"* %39) #3
+  %40 = bitcast ptr %39 to ptr
+  %vtable.i.i.i = load ptr, ptr %40, align 8, !tbaa !11
+  %vfn.i.i.i = getelementptr inbounds ptr, ptr %vtable.i.i.i, i64 1
+  %41 = load ptr, ptr %vfn.i.i.i, align 8
+  call void %41(ptr %39) #3
   br label %_ZNSt10unique_ptrIN4llvm12MemoryBufferESt14default_deleteIS1_EED2Ev.exit.i
 
 _ZNSt10unique_ptrIN4llvm12MemoryBufferESt14default_deleteIS1_EED2Ev.exit.i: ; preds = %_ZNKSt14default_deleteIN4llvm12MemoryBufferEEclEPS1_.exit.i.i, %_ZN4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE10getStorageEv.exit.i
-  store %"class.llvm::MemoryBuffer"* null, %"class.llvm::MemoryBuffer"** %_M_head_impl.i.i.i.i.i.i, align 8, !tbaa !27
+  store ptr null, ptr %_M_head_impl.i.i.i.i.i.i, align 8, !tbaa !27
   br label %_ZN4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEED2Ev.exit
 
 _ZN4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEED2Ev.exit: ; preds = %cleanup, %_ZNSt10unique_ptrIN4llvm12MemoryBufferESt14default_deleteIS1_EED2Ev.exit.i
-  ret %"class.llvm::Module"* %retval.0
+  ret ptr %retval.0
 }
 
 ; Function Attrs: nounwind
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #3
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #3
 
 ; Function Attrs: nounwind
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #3
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #3
 
 ; Function Attrs: noreturn nounwind
-declare void @__assert_fail(i8*, i8*, i32 zeroext, i8*) #4
+declare void @__assert_fail(ptr, ptr, i32 zeroext, ptr) #4
 
-declare dereferenceable(8) %"class.std::basic_string"* @_ZNSs6insertEmPKcm(%"class.std::basic_string"*, i64, i8*, i64) #1
+declare dereferenceable(8) ptr @_ZNSs6insertEmPKcm(ptr, i64, ptr, i64) #1
 
 ; Function Attrs: nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) #3
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1) #3
 
 ; Function Attrs: nounwind
-declare void @_ZNSs4_Rep10_M_destroyERKSaIcE(%"struct.std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_Rep"*, %"class.std::allocator"* dereferenceable(1)) #0
+declare void @_ZNSs4_Rep10_M_destroyERKSaIcE(ptr, ptr dereferenceable(1)) #0
 
 ; Function Attrs: nounwind
-declare extern_weak signext i32 @__pthread_key_create(i32*, void (i8*)*) #0
+declare extern_weak signext i32 @__pthread_key_create(ptr, ptr) #0
 
 ; Function Attrs: nobuiltin nounwind
-declare void @_ZdlPv(i8*) #6
+declare void @_ZdlPv(ptr) #6
 
-declare void @_ZNSsC1EPKcmRKSaIcE(%"class.std::basic_string"*, i8*, i64, %"class.std::allocator"* dereferenceable(1)) #1
+declare void @_ZNSsC1EPKcmRKSaIcE(ptr, ptr, i64, ptr dereferenceable(1)) #1
 
-declare hidden void @_ZN4llvm12SMDiagnosticD2Ev(%"class.llvm::SMDiagnostic"* readonly %this) unnamed_addr #2 align 2
+declare hidden void @_ZN4llvm12SMDiagnosticD2Ev(ptr readonly %this) unnamed_addr #2 align 2
 
-declare dereferenceable(48) %"class.llvm::SmallVectorImpl.85"* @_ZN4llvm15SmallVectorImplINS_7SMFixItEEaSEOS2_(%"class.llvm::SmallVectorImpl.85"* %this, %"class.llvm::SmallVectorImpl.85"* dereferenceable(48) %RHS) #0 align 2
+declare dereferenceable(48) ptr @_ZN4llvm15SmallVectorImplINS_7SMFixItEEaSEOS2_(ptr %this, ptr dereferenceable(48) %RHS) #0 align 2
 
-declare %"class.llvm::Module"* @_ZN4llvm7ParseIREPNS_12MemoryBufferERNS_12SMDiagnosticERNS_11LLVMContextE(%"class.llvm::MemoryBuffer"* %Buffer, %"class.llvm::SMDiagnostic"* dereferenceable(200) %Err, %"class.llvm::LLVMContext"* dereferenceable(8) %Context) #0
+declare ptr @_ZN4llvm7ParseIREPNS_12MemoryBufferERNS_12SMDiagnosticERNS_11LLVMContextE(ptr %Buffer, ptr dereferenceable(200) %Err, ptr dereferenceable(8) %Context) #0
 
-declare void @_ZNSs4swapERSs(%"class.std::basic_string"*, %"class.std::basic_string"* dereferenceable(8)) #1
+declare void @_ZNSs4swapERSs(ptr, ptr dereferenceable(8)) #1
 
 ; Function Attrs: nounwind
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) #3
+declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) #3
 
 attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
 attributes #1 = { "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }

diff  --git a/llvm/test/CodeGen/PowerPC/vsx-infl-copy1.ll b/llvm/test/CodeGen/PowerPC/vsx-infl-copy1.ll
index 40334109baaf7..02aeeb2f90ce8 100644
--- a/llvm/test/CodeGen/PowerPC/vsx-infl-copy1.ll
+++ b/llvm/test/CodeGen/PowerPC/vsx-infl-copy1.ll
@@ -6,7 +6,7 @@ target triple = "powerpc64-unknown-linux-gnu"
 @uc = external global [1024 x i32], align 4
 
 ; Function Attrs: noinline nounwind
-define <4 x i32> @_Z8example9Pj(<4 x i32>* %addr1, i64 %input1, i64 %input2) #0 {
+define <4 x i32> @_Z8example9Pj(ptr %addr1, i64 %input1, i64 %input2) #0 {
 entry:
   br label %vector.body
 
@@ -30,69 +30,69 @@ vector.body:                                      ; preds = %vector.body, %entry
   %vec.phi28 = phi <4 x i32> [ zeroinitializer, %entry ], [ %51, %vector.body ]
   %vec.phi29 = phi <4 x i32> [ zeroinitializer, %entry ], [ %52, %vector.body ]
   %vec.phi30 = phi <4 x i32> [ zeroinitializer, %entry ], [ %53, %vector.body ]
-  %wide.load32 = load <4 x i32>, <4 x i32>* null, align 4
+  %wide.load32 = load <4 x i32>, ptr null, align 4
   %.sum82 = add i64 %index, 24
-  %0 = getelementptr [1024 x i32], [1024 x i32]* @ub, i64 0, i64 %.sum82
-  %1 = bitcast i32* %0 to <4 x i32>*
-  %wide.load36 = load <4 x i32>, <4 x i32>* %1, align 4
-  %wide.load37 = load <4 x i32>, <4 x i32>* %addr1, align 4
+  %0 = getelementptr [1024 x i32], ptr @ub, i64 0, i64 %.sum82
+  %1 = bitcast ptr %0 to ptr
+  %wide.load36 = load <4 x i32>, ptr %1, align 4
+  %wide.load37 = load <4 x i32>, ptr %addr1, align 4
   %.sum84 = add i64 %index, 32
-  %2 = getelementptr [1024 x i32], [1024 x i32]* @ub, i64 0, i64 %.sum84
-  %3 = bitcast i32* %2 to <4 x i32>*
-  %wide.load38 = load <4 x i32>, <4 x i32>* %3, align 4
+  %2 = getelementptr [1024 x i32], ptr @ub, i64 0, i64 %.sum84
+  %3 = bitcast ptr %2 to ptr
+  %wide.load38 = load <4 x i32>, ptr %3, align 4
   %.sum85 = add i64 %index, 36
-  %4 = getelementptr [1024 x i32], [1024 x i32]* @ub, i64 0, i64 %.sum85
-  %5 = bitcast i32* %4 to <4 x i32>*
-  %wide.load39 = load <4 x i32>, <4 x i32>* %5, align 4
-  %6 = getelementptr [1024 x i32], [1024 x i32]* @ub, i64 0, i64 %input1
-  %7 = bitcast i32* %6 to <4 x i32>*
-  %wide.load40 = load <4 x i32>, <4 x i32>* %7, align 4
+  %4 = getelementptr [1024 x i32], ptr @ub, i64 0, i64 %.sum85
+  %5 = bitcast ptr %4 to ptr
+  %wide.load39 = load <4 x i32>, ptr %5, align 4
+  %6 = getelementptr [1024 x i32], ptr @ub, i64 0, i64 %input1
+  %7 = bitcast ptr %6 to ptr
+  %wide.load40 = load <4 x i32>, ptr %7, align 4
   %.sum87 = add i64 %index, 44
-  %8 = getelementptr [1024 x i32], [1024 x i32]* @ub, i64 0, i64 %.sum87
-  %9 = bitcast i32* %8 to <4 x i32>*
-  %wide.load41 = load <4 x i32>, <4 x i32>* %9, align 4
-  %10 = getelementptr inbounds [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %index
-  %11 = bitcast i32* %10 to <4 x i32>*
-  %wide.load42 = load <4 x i32>, <4 x i32>* %11, align 4
+  %8 = getelementptr [1024 x i32], ptr @ub, i64 0, i64 %.sum87
+  %9 = bitcast ptr %8 to ptr
+  %wide.load41 = load <4 x i32>, ptr %9, align 4
+  %10 = getelementptr inbounds [1024 x i32], ptr @uc, i64 0, i64 %index
+  %11 = bitcast ptr %10 to ptr
+  %wide.load42 = load <4 x i32>, ptr %11, align 4
   %.sum8889 = or i64 %index, 4
-  %12 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %.sum8889
-  %13 = bitcast i32* %12 to <4 x i32>*
-  %wide.load43 = load <4 x i32>, <4 x i32>* %13, align 4
+  %12 = getelementptr [1024 x i32], ptr @uc, i64 0, i64 %.sum8889
+  %13 = bitcast ptr %12 to ptr
+  %wide.load43 = load <4 x i32>, ptr %13, align 4
   %.sum9091 = or i64 %index, 8
-  %14 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %.sum9091
-  %15 = bitcast i32* %14 to <4 x i32>*
-  %wide.load44 = load <4 x i32>, <4 x i32>* %15, align 4
+  %14 = getelementptr [1024 x i32], ptr @uc, i64 0, i64 %.sum9091
+  %15 = bitcast ptr %14 to ptr
+  %wide.load44 = load <4 x i32>, ptr %15, align 4
   %.sum94 = add i64 %index, 16
-  %16 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %.sum94
-  %17 = bitcast i32* %16 to <4 x i32>*
-  %wide.load46 = load <4 x i32>, <4 x i32>* %17, align 4
+  %16 = getelementptr [1024 x i32], ptr @uc, i64 0, i64 %.sum94
+  %17 = bitcast ptr %16 to ptr
+  %wide.load46 = load <4 x i32>, ptr %17, align 4
   %.sum95 = add i64 %index, 20
-  %18 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %.sum95
-  %19 = bitcast i32* %18 to <4 x i32>*
-  %wide.load47 = load <4 x i32>, <4 x i32>* %19, align 4
-  %20 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %input2
-  %21 = bitcast i32* %20 to <4 x i32>*
-  %wide.load48 = load <4 x i32>, <4 x i32>* %21, align 4
+  %18 = getelementptr [1024 x i32], ptr @uc, i64 0, i64 %.sum95
+  %19 = bitcast ptr %18 to ptr
+  %wide.load47 = load <4 x i32>, ptr %19, align 4
+  %20 = getelementptr [1024 x i32], ptr @uc, i64 0, i64 %input2
+  %21 = bitcast ptr %20 to ptr
+  %wide.load48 = load <4 x i32>, ptr %21, align 4
   %.sum97 = add i64 %index, 28
-  %22 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %.sum97
-  %23 = bitcast i32* %22 to <4 x i32>*
-  %wide.load49 = load <4 x i32>, <4 x i32>* %23, align 4
+  %22 = getelementptr [1024 x i32], ptr @uc, i64 0, i64 %.sum97
+  %23 = bitcast ptr %22 to ptr
+  %wide.load49 = load <4 x i32>, ptr %23, align 4
   %.sum98 = add i64 %index, 32
-  %24 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %.sum98
-  %25 = bitcast i32* %24 to <4 x i32>*
-  %wide.load50 = load <4 x i32>, <4 x i32>* %25, align 4
+  %24 = getelementptr [1024 x i32], ptr @uc, i64 0, i64 %.sum98
+  %25 = bitcast ptr %24 to ptr
+  %wide.load50 = load <4 x i32>, ptr %25, align 4
   %.sum99 = add i64 %index, 36
-  %26 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %.sum99
-  %27 = bitcast i32* %26 to <4 x i32>*
-  %wide.load51 = load <4 x i32>, <4 x i32>* %27, align 4
+  %26 = getelementptr [1024 x i32], ptr @uc, i64 0, i64 %.sum99
+  %27 = bitcast ptr %26 to ptr
+  %wide.load51 = load <4 x i32>, ptr %27, align 4
   %.sum100 = add i64 %index, 40
-  %28 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %.sum100
-  %29 = bitcast i32* %28 to <4 x i32>*
-  %wide.load52 = load <4 x i32>, <4 x i32>* %29, align 4
+  %28 = getelementptr [1024 x i32], ptr @uc, i64 0, i64 %.sum100
+  %29 = bitcast ptr %28 to ptr
+  %wide.load52 = load <4 x i32>, ptr %29, align 4
   %.sum101 = add i64 %index, 44
-  %30 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %.sum101
-  %31 = bitcast i32* %30 to <4 x i32>*
-  %wide.load53 = load <4 x i32>, <4 x i32>* %31, align 4
+  %30 = getelementptr [1024 x i32], ptr @uc, i64 0, i64 %.sum101
+  %31 = bitcast ptr %30 to ptr
+  %wide.load53 = load <4 x i32>, ptr %31, align 4
   %32 = add <4 x i32> zeroinitializer, %vec.phi
   %33 = add <4 x i32> zeroinitializer, %vec.phi20
   %34 = add <4 x i32> %wide.load32, %vec.phi21

diff  --git a/llvm/test/CodeGen/PowerPC/vsx-infl-copy2.ll b/llvm/test/CodeGen/PowerPC/vsx-infl-copy2.ll
index a38803f23adf5..04bbc2ef032a1 100644
--- a/llvm/test/CodeGen/PowerPC/vsx-infl-copy2.ll
+++ b/llvm/test/CodeGen/PowerPC/vsx-infl-copy2.ll
@@ -3,7 +3,7 @@ target datalayout = "E-m:e-i64:64-n32:64"
 target triple = "powerpc64-unknown-linux-gnu"
 
 ; Function Attrs: nounwind
-define void @_Z28test_goto_loop_unroll_factorILi22EiEvPKT0_iPKc(i32* nocapture readonly %first) #0 {
+define void @_Z28test_goto_loop_unroll_factorILi22EiEvPKT0_iPKc(ptr nocapture readonly %first) #0 {
 entry:
   br i1 false, label %loop2_start, label %if.end5
 
@@ -29,39 +29,39 @@ vector.body:                                      ; preds = %vector.body, %loop_
   %vec.phi70 = phi <4 x i32> [ %41, %vector.body ], [ zeroinitializer, %loop_start.preheader ]
   %vec.phi71 = phi <4 x i32> [ %42, %vector.body ], [ zeroinitializer, %loop_start.preheader ]
   %.sum = add i64 0, 4
-  %wide.load72 = load <4 x i32>, <4 x i32>* null, align 4
+  %wide.load72 = load <4 x i32>, ptr null, align 4
   %.sum109 = add i64 0, 8
-  %0 = getelementptr i32, i32* %first, i64 %.sum109
-  %1 = bitcast i32* %0 to <4 x i32>*
-  %wide.load73 = load <4 x i32>, <4 x i32>* %1, align 4
+  %0 = getelementptr i32, ptr %first, i64 %.sum109
+  %1 = bitcast ptr %0 to ptr
+  %wide.load73 = load <4 x i32>, ptr %1, align 4
   %.sum110 = add i64 0, 12
-  %2 = getelementptr i32, i32* %first, i64 %.sum110
-  %3 = bitcast i32* %2 to <4 x i32>*
-  %wide.load74 = load <4 x i32>, <4 x i32>* %3, align 4
+  %2 = getelementptr i32, ptr %first, i64 %.sum110
+  %3 = bitcast ptr %2 to ptr
+  %wide.load74 = load <4 x i32>, ptr %3, align 4
   %.sum112 = add i64 0, 20
-  %4 = getelementptr i32, i32* %first, i64 %.sum112
-  %5 = bitcast i32* %4 to <4 x i32>*
-  %wide.load76 = load <4 x i32>, <4 x i32>* %5, align 4
+  %4 = getelementptr i32, ptr %first, i64 %.sum112
+  %5 = bitcast ptr %4 to ptr
+  %wide.load76 = load <4 x i32>, ptr %5, align 4
   %.sum114 = add i64 0, 28
-  %6 = getelementptr i32, i32* %first, i64 %.sum114
-  %7 = bitcast i32* %6 to <4 x i32>*
-  %wide.load78 = load <4 x i32>, <4 x i32>* %7, align 4
+  %6 = getelementptr i32, ptr %first, i64 %.sum114
+  %7 = bitcast ptr %6 to ptr
+  %wide.load78 = load <4 x i32>, ptr %7, align 4
   %.sum115 = add i64 0, 32
-  %8 = getelementptr i32, i32* %first, i64 %.sum115
-  %9 = bitcast i32* %8 to <4 x i32>*
-  %wide.load79 = load <4 x i32>, <4 x i32>* %9, align 4
+  %8 = getelementptr i32, ptr %first, i64 %.sum115
+  %9 = bitcast ptr %8 to ptr
+  %wide.load79 = load <4 x i32>, ptr %9, align 4
   %.sum116 = add i64 0, 36
-  %10 = getelementptr i32, i32* %first, i64 %.sum116
-  %11 = bitcast i32* %10 to <4 x i32>*
-  %wide.load80 = load <4 x i32>, <4 x i32>* %11, align 4
+  %10 = getelementptr i32, ptr %first, i64 %.sum116
+  %11 = bitcast ptr %10 to ptr
+  %wide.load80 = load <4 x i32>, ptr %11, align 4
   %.sum117 = add i64 0, 40
-  %12 = getelementptr i32, i32* %first, i64 %.sum117
-  %13 = bitcast i32* %12 to <4 x i32>*
-  %wide.load81 = load <4 x i32>, <4 x i32>* %13, align 4
+  %12 = getelementptr i32, ptr %first, i64 %.sum117
+  %13 = bitcast ptr %12 to ptr
+  %wide.load81 = load <4 x i32>, ptr %13, align 4
   %.sum118 = add i64 0, 44
-  %14 = getelementptr i32, i32* %first, i64 %.sum118
-  %15 = bitcast i32* %14 to <4 x i32>*
-  %wide.load82 = load <4 x i32>, <4 x i32>* %15, align 4
+  %14 = getelementptr i32, ptr %first, i64 %.sum118
+  %15 = bitcast ptr %14 to ptr
+  %wide.load82 = load <4 x i32>, ptr %15, align 4
   %16 = mul <4 x i32> %wide.load72, <i32 269850533, i32 269850533, i32 269850533, i32 269850533>
   %17 = mul <4 x i32> %wide.load73, <i32 269850533, i32 269850533, i32 269850533, i32 269850533>
   %18 = mul <4 x i32> %wide.load74, <i32 269850533, i32 269850533, i32 269850533, i32 269850533>

diff  --git a/llvm/test/CodeGen/RISCV/calling-conv-ilp32e.ll b/llvm/test/CodeGen/RISCV/calling-conv-ilp32e.ll
index 01a47c18c7a48..0eb63913be889 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-ilp32e.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-ilp32e.ll
@@ -1861,7 +1861,7 @@ define i32 @caller_mixed_scalar_libcalls(i64 %a) {
 
 ; Check passing of coerced integer arrays
 
-%struct.small = type { i32, i32* }
+%struct.small = type { i32, ptr }
 
 define i32 @callee_small_coerced_struct([2 x i32] %a.coerce) {
 ; ILP32E-FPELIM-LABEL: callee_small_coerced_struct:
@@ -1973,7 +1973,7 @@ define i32 @caller_small_coerced_struct() {
 
 %struct.large = type { i32, i32, i32, i32 }
 
-define i32 @callee_large_struct(%struct.large* byval(%struct.large) align 4 %a) {
+define i32 @callee_large_struct(ptr byval(%struct.large) align 4 %a) {
 ; ILP32E-FPELIM-LABEL: callee_large_struct:
 ; ILP32E-FPELIM:       # %bb.0:
 ; ILP32E-FPELIM-NEXT:    lw a1, 0(a0)
@@ -2018,10 +2018,10 @@ define i32 @callee_large_struct(%struct.large* byval(%struct.large) align 4 %a)
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    lw a0, 12(a0)
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    add a0, a1, a0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
-  %1 = getelementptr inbounds %struct.large, %struct.large* %a, i32 0, i32 0
-  %2 = getelementptr inbounds %struct.large, %struct.large* %a, i32 0, i32 3
-  %3 = load i32, i32* %1
-  %4 = load i32, i32* %2
+  %1 = getelementptr inbounds %struct.large, ptr %a, i32 0, i32 0
+  %2 = getelementptr inbounds %struct.large, ptr %a, i32 0, i32 3
+  %3 = load i32, ptr %1
+  %4 = load i32, ptr %2
   %5 = add i32 %3, %4
   ret i32 %5
 }
@@ -2129,16 +2129,16 @@ define i32 @caller_large_struct() {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    addi sp, sp, 32
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %ls = alloca %struct.large, align 4
-  %1 = bitcast %struct.large* %ls to i8*
-  %a = getelementptr inbounds %struct.large, %struct.large* %ls, i32 0, i32 0
-  store i32 1, i32* %a
-  %b = getelementptr inbounds %struct.large, %struct.large* %ls, i32 0, i32 1
-  store i32 2, i32* %b
-  %c = getelementptr inbounds %struct.large, %struct.large* %ls, i32 0, i32 2
-  store i32 3, i32* %c
-  %d = getelementptr inbounds %struct.large, %struct.large* %ls, i32 0, i32 3
-  store i32 4, i32* %d
-  %2 = call i32 @callee_large_struct(%struct.large* byval(%struct.large) align 4 %ls)
+  %1 = bitcast ptr %ls to ptr
+  %a = getelementptr inbounds %struct.large, ptr %ls, i32 0, i32 0
+  store i32 1, ptr %a
+  %b = getelementptr inbounds %struct.large, ptr %ls, i32 0, i32 1
+  store i32 2, ptr %b
+  %c = getelementptr inbounds %struct.large, ptr %ls, i32 0, i32 2
+  store i32 3, ptr %c
+  %d = getelementptr inbounds %struct.large, ptr %ls, i32 0, i32 3
+  store i32 4, ptr %d
+  %2 = call i32 @callee_large_struct(ptr byval(%struct.large) align 4 %ls)
   ret i32 %2
 }
 
@@ -2185,7 +2185,7 @@ define %struct.small @callee_small_struct_ret() {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    li a0, 1
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    li a1, 0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
-  ret %struct.small { i32 1, i32* null }
+  ret %struct.small { i32 1, ptr null }
 }
 
 define i32 @caller_small_struct_ret() {
@@ -2241,7 +2241,7 @@ define i32 @caller_small_struct_ret() {
   %1 = call %struct.small @callee_small_struct_ret()
   %2 = extractvalue %struct.small %1, 0
   %3 = extractvalue %struct.small %1, 1
-  %4 = ptrtoint i32* %3 to i32
+  %4 = ptrtoint ptr %3 to i32
   %5 = add i32 %2, %4
   ret i32 %5
 }
@@ -2380,7 +2380,7 @@ define void @caller_large_scalar_ret() {
 
 ; Check return of >2x xlen structs
 
-define void @callee_large_struct_ret(%struct.large* noalias sret(%struct.large) %agg.result) {
+define void @callee_large_struct_ret(ptr noalias sret(%struct.large) %agg.result) {
 ; ILP32E-FPELIM-LABEL: callee_large_struct_ret:
 ; ILP32E-FPELIM:       # %bb.0:
 ; ILP32E-FPELIM-NEXT:    li a1, 1
@@ -2445,14 +2445,14 @@ define void @callee_large_struct_ret(%struct.large* noalias sret(%struct.large)
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    li a1, 4
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    sw a1, 12(a0)
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
-  %a = getelementptr inbounds %struct.large, %struct.large* %agg.result, i32 0, i32 0
-  store i32 1, i32* %a, align 4
-  %b = getelementptr inbounds %struct.large, %struct.large* %agg.result, i32 0, i32 1
-  store i32 2, i32* %b, align 4
-  %c = getelementptr inbounds %struct.large, %struct.large* %agg.result, i32 0, i32 2
-  store i32 3, i32* %c, align 4
-  %d = getelementptr inbounds %struct.large, %struct.large* %agg.result, i32 0, i32 3
-  store i32 4, i32* %d, align 4
+  %a = getelementptr inbounds %struct.large, ptr %agg.result, i32 0, i32 0
+  store i32 1, ptr %a, align 4
+  %b = getelementptr inbounds %struct.large, ptr %agg.result, i32 0, i32 1
+  store i32 2, ptr %b, align 4
+  %c = getelementptr inbounds %struct.large, ptr %agg.result, i32 0, i32 2
+  store i32 3, ptr %c, align 4
+  %d = getelementptr inbounds %struct.large, ptr %agg.result, i32 0, i32 3
+  store i32 4, ptr %d, align 4
   ret void
 }
 
@@ -2539,11 +2539,11 @@ define i32 @caller_large_struct_ret() {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    addi sp, sp, 16
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %1 = alloca %struct.large
-  call void @callee_large_struct_ret(%struct.large* sret(%struct.large) %1)
-  %2 = getelementptr inbounds %struct.large, %struct.large* %1, i32 0, i32 0
-  %3 = load i32, i32* %2
-  %4 = getelementptr inbounds %struct.large, %struct.large* %1, i32 0, i32 3
-  %5 = load i32, i32* %4
+  call void @callee_large_struct_ret(ptr sret(%struct.large) %1)
+  %2 = getelementptr inbounds %struct.large, ptr %1, i32 0, i32 0
+  %3 = load i32, ptr %2
+  %4 = getelementptr inbounds %struct.large, ptr %1, i32 0, i32 3
+  %5 = load i32, ptr %4
   %6 = add i32 %3, %5
   ret i32 %6
 }

diff  --git a/llvm/test/CodeGen/RISCV/copy-frameindex.mir b/llvm/test/CodeGen/RISCV/copy-frameindex.mir
index f220751daad4c..31ffc3f0f83c6 100644
--- a/llvm/test/CodeGen/RISCV/copy-frameindex.mir
+++ b/llvm/test/CodeGen/RISCV/copy-frameindex.mir
@@ -5,11 +5,11 @@
   define void @sink_addi_fi(i32 %0) !dbg !5 {
   bb.0:
     %1 = alloca i32, align 4
-    call void @llvm.dbg.value(metadata i32* %1, metadata !1, metadata !DIExpression()), !dbg !3
+    call void @llvm.dbg.value(metadata ptr %1, metadata !1, metadata !DIExpression()), !dbg !3
     %2 = icmp eq i32 %0, 0
     br i1 %2, label %bb.2, label %bb.1
   bb.1:
-    store volatile i32 0, i32* %1, align 4
+    store volatile i32 0, ptr %1, align 4
     br label %bb.2
   bb.2:
     ret void

diff  --git a/llvm/test/CodeGen/RISCV/copyprop.ll b/llvm/test/CodeGen/RISCV/copyprop.ll
index 5726a35352788..ddf58403382c5 100644
--- a/llvm/test/CodeGen/RISCV/copyprop.ll
+++ b/llvm/test/CodeGen/RISCV/copyprop.ll
@@ -2,7 +2,7 @@
 ; RUN: llc < %s -O3 -mtriple=riscv64 -riscv-enable-copy-propagation=false | FileCheck %s --check-prefix=NOPROP
 ; RUN: llc < %s -O3 -mtriple=riscv64 -riscv-enable-copy-propagation=true | FileCheck %s --check-prefix=PROP
 
-define void @copyprop_after_mbp(i32 %v, i32* %a, i32* %b, i32* %c, i32* %d) {
+define void @copyprop_after_mbp(i32 %v, ptr %a, ptr %b, ptr %c, ptr %d) {
 ; NOPROP-LABEL: copyprop_after_mbp:
 ; NOPROP:       # %bb.0:
 ; NOPROP-NEXT:    sext.w a0, a0
@@ -49,16 +49,16 @@ define void @copyprop_after_mbp(i32 %v, i32* %a, i32* %b, i32* %c, i32* %d) {
   br i1 %1, label %bb.0, label %bb.1
 
 bb.0:
-  store i32 15, i32* %b, align 4
+  store i32 15, ptr %b, align 4
   br label %bb.2
 
 bb.1:
-  store i32 25, i32* %c, align 4
+  store i32 25, ptr %c, align 4
   br label %bb.2
 
 bb.2:
   %2 = phi i32 [ 1, %bb.0 ], [ 0, %bb.1 ]
-  store i32 %2, i32* %a, align 4
-  store i32 12, i32* %d, align 4
+  store i32 %2, ptr %a, align 4
+  store i32 12, ptr %d, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll b/llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll
index a60fd26f49594..adf614435b31d 100644
--- a/llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll
+++ b/llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll
@@ -7,7 +7,7 @@
 
 @global_x = global i32 0, align 4
 
-define signext i32 @ctz_dereferencing_pointer(i64* %b) nounwind {
+define signext i32 @ctz_dereferencing_pointer(ptr %b) nounwind {
 ; RV64ZBB-LABEL: ctz_dereferencing_pointer:
 ; RV64ZBB:       # %bb.0: # %entry
 ; RV64ZBB-NEXT:    ld a0, 0(a0)
@@ -110,7 +110,7 @@ define signext i32 @ctz_dereferencing_pointer(i64* %b) nounwind {
 
 
 entry:
-  %0 = load i64, i64* %b, align 8
+  %0 = load i64, ptr %b, align 8
   %1 = tail call i64 @llvm.cttz.i64(i64 %0, i1 true)
   %2 = icmp eq i64 %0, 0
   %3 = trunc i64 %1 to i32
@@ -118,7 +118,7 @@ entry:
   ret i32 %4
 }
 
-define i64 @ctz_dereferencing_pointer_zext(i32* %b) nounwind {
+define i64 @ctz_dereferencing_pointer_zext(ptr %b) nounwind {
 ; RV64ZBB-LABEL: ctz_dereferencing_pointer_zext:
 ; RV64ZBB:       # %bb.0: # %entry
 ; RV64ZBB-NEXT:    lw a0, 0(a0)
@@ -189,7 +189,7 @@ define i64 @ctz_dereferencing_pointer_zext(i32* %b) nounwind {
 
 
 entry:
-  %0 = load i32, i32* %b, align 8
+  %0 = load i32, ptr %b, align 8
   %1 = tail call i32 @llvm.cttz.i32(i32 %0, i1 true)
   %2 = icmp eq i32 %0, 0
   %3 = zext i32 %1 to i64
@@ -968,7 +968,7 @@ define signext i32 @globalVar() nounwind {
 
 
 entry:
-  %0 = load i32, i32* @global_x, align 4
+  %0 = load i32, ptr @global_x, align 4
   %1 = tail call i32 @llvm.cttz.i32(i32 %0, i1 true)
   %2 = icmp eq i32 %0, 0
   %conv = select i1 %2, i32 0, i32 %1

diff  --git a/llvm/test/CodeGen/RISCV/fli-licm.ll b/llvm/test/CodeGen/RISCV/fli-licm.ll
index ba6b33c4f0a46..d7488774b41a5 100644
--- a/llvm/test/CodeGen/RISCV/fli-licm.ll
+++ b/llvm/test/CodeGen/RISCV/fli-licm.ll
@@ -7,7 +7,7 @@
 ; The purpose of this test is to check that an FLI instruction that
 ; materializes an immediate is not MachineLICM'd out of a loop.
 
-%struct.Node = type { ptr, i8* }
+%struct.Node = type { ptr, ptr }
 
 define void @process_nodes(ptr %0) nounwind {
 ; RV32-LABEL: process_nodes:
@@ -58,7 +58,7 @@ entry:
   br i1 %1, label %exit, label %loop
 
 loop:
-  %2 = phi %struct.Node* [ %4, %loop ], [ %0, %entry ]
+  %2 = phi ptr [ %4, %loop ], [ %0, %entry ]
   tail call void @do_it(float 1.000000e+00, ptr nonnull %2)
   %3 = getelementptr inbounds %struct.Node, ptr %2, i64 0, i32 0
   %4 = load ptr, ptr %3, align 8

diff  --git a/llvm/test/CodeGen/RISCV/live-sp.mir b/llvm/test/CodeGen/RISCV/live-sp.mir
index df72b1ddfebc5..8dd307f521f5b 100644
--- a/llvm/test/CodeGen/RISCV/live-sp.mir
+++ b/llvm/test/CodeGen/RISCV/live-sp.mir
@@ -8,17 +8,17 @@
   define void @test1() {
   entry:
     %a = alloca i32, align 4
-    %0 = call i8* @llvm.returnaddress(i32 0)
-    %1 = ptrtoint i8* %0 to i64
+    %0 = call ptr @llvm.returnaddress(i32 0)
+    %1 = ptrtoint ptr %0 to i64
     %conv = trunc i64 %1 to i32
-    store i32 %conv, i32* %a, align 4
-    %2 = load i32, i32* %a, align 4
+    store i32 %conv, ptr %a, align 4
+    %2 = load i32, ptr %a, align 4
     call void (i32, ...) @vararg(i32 signext 0, i32 signext %2)
     ret void
   }
 
   ; Function Attrs: nofree nosync nounwind readnone willreturn
-  declare i8* @llvm.returnaddress(i32 immarg) #0
+  declare ptr @llvm.returnaddress(i32 immarg) #0
 
   attributes #0 = { nofree nosync nounwind readnone willreturn }
 

diff  --git a/llvm/test/CodeGen/RISCV/make-compressible-rv64.mir b/llvm/test/CodeGen/RISCV/make-compressible-rv64.mir
index bfe8133162415..ed740d545c1da 100644
--- a/llvm/test/CodeGen/RISCV/make-compressible-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/make-compressible-rv64.mir
@@ -3,98 +3,98 @@
 # RUN:   -run-pass=riscv-make-compressible | FileCheck %s
 --- |
 
-  define void @store_common_value(i64* %a, i64* %b, i64* %c) #0 {
+  define void @store_common_value(ptr %a, ptr %b, ptr %c) #0 {
   entry:
-    store i64 0, i64* %a, align 8
-    store i64 0, i64* %b, align 8
-    store i64 0, i64* %c, align 8
+    store i64 0, ptr %a, align 8
+    store i64 0, ptr %b, align 8
+    store i64 0, ptr %c, align 8
     ret void
   }
 
-  define void @store_common_ptr(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e, i64 %f, i64* %p) #0 {
+  define void @store_common_ptr(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e, i64 %f, ptr %p) #0 {
   entry:
-    store volatile i64 1, i64* %p, align 8
-    store volatile i64 3, i64* %p, align 8
-    store volatile i64 5, i64* %p, align 8
+    store volatile i64 1, ptr %p, align 8
+    store volatile i64 3, ptr %p, align 8
+    store volatile i64 5, ptr %p, align 8
     ret void
   }
 
-  define void @store_common_ptr_self(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e, i64 %f, i64* %p) #0 {
+  define void @store_common_ptr_self(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e, i64 %f, ptr %p) #0 {
   entry:
-    %q = bitcast i64* %p to i64**
-    store volatile i64 1, i64* %p, align 8
-    store volatile i64 3, i64* %p, align 8
-    store volatile i64* %p, i64** %q, align 8
+    %q = bitcast ptr %p to ptr
+    store volatile i64 1, ptr %p, align 8
+    store volatile i64 3, ptr %p, align 8
+    store volatile ptr %p, ptr %q, align 8
     ret void
   }
 
-  define void @load_common_ptr(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e, i64 %f, i64* %p) #0 {
+  define void @load_common_ptr(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e, i64 %f, ptr %p) #0 {
   entry:
-    %g = load volatile i64, i64* %p, align 8
-    %h = load volatile i64, i64* %p, align 8
-    %i = load volatile i64, i64* %p, align 8
+    %g = load volatile i64, ptr %p, align 8
+    %h = load volatile i64, ptr %p, align 8
+    %i = load volatile i64, ptr %p, align 8
     ret void
   }
 
-  define void @store_large_offset(i64* %p) #0 {
+  define void @store_large_offset(ptr %p) #0 {
   entry:
-    %0 = getelementptr inbounds i64, i64* %p, i64 100
-    store volatile i64 1, i64* %0, align 8
-    %1 = getelementptr inbounds i64, i64* %p, i64 101
-    store volatile i64 3, i64* %1, align 8
-    %2 = getelementptr inbounds i64, i64* %p, i64 102
-    store volatile i64 5, i64* %2, align 8
-    %3 = getelementptr inbounds i64, i64* %p, i64 103
-    store volatile i64 7, i64* %3, align 8
+    %0 = getelementptr inbounds i64, ptr %p, i64 100
+    store volatile i64 1, ptr %0, align 8
+    %1 = getelementptr inbounds i64, ptr %p, i64 101
+    store volatile i64 3, ptr %1, align 8
+    %2 = getelementptr inbounds i64, ptr %p, i64 102
+    store volatile i64 5, ptr %2, align 8
+    %3 = getelementptr inbounds i64, ptr %p, i64 103
+    store volatile i64 7, ptr %3, align 8
     ret void
   }
 
-  define void @load_large_offset(i64* %p) #0 {
+  define void @load_large_offset(ptr %p) #0 {
   entry:
-    %0 = getelementptr inbounds i64, i64* %p, i64 100
-    %a = load volatile i64, i64* %0, align 8
-    %1 = getelementptr inbounds i64, i64* %p, i64 101
-    %b = load volatile i64, i64* %1, align 8
-    %2 = getelementptr inbounds i64, i64* %p, i64 102
-    %c = load volatile i64, i64* %2, align 8
-    %3 = getelementptr inbounds i64, i64* %p, i64 103
-    %d = load volatile i64, i64* %3, align 8
+    %0 = getelementptr inbounds i64, ptr %p, i64 100
+    %a = load volatile i64, ptr %0, align 8
+    %1 = getelementptr inbounds i64, ptr %p, i64 101
+    %b = load volatile i64, ptr %1, align 8
+    %2 = getelementptr inbounds i64, ptr %p, i64 102
+    %c = load volatile i64, ptr %2, align 8
+    %3 = getelementptr inbounds i64, ptr %p, i64 103
+    %d = load volatile i64, ptr %3, align 8
     ret void
   }
 
-  define void @store_common_value_no_opt(i64* %a) #0 {
+  define void @store_common_value_no_opt(ptr %a) #0 {
   entry:
-    store i64 0, i64* %a, align 8
+    store i64 0, ptr %a, align 8
     ret void
   }
 
-  define void @store_common_ptr_no_opt(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e, i64 %f, i64* %p) #0 {
+  define void @store_common_ptr_no_opt(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e, i64 %f, ptr %p) #0 {
   entry:
-    store volatile i64 1, i64* %p, align 8
+    store volatile i64 1, ptr %p, align 8
     ret void
   }
 
-  define void @load_common_ptr_no_opt(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e, i64 %f, i64* %p) #0 {
+  define void @load_common_ptr_no_opt(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e, i64 %f, ptr %p) #0 {
   entry:
-    %g = load volatile i64, i64* %p, align 8
+    %g = load volatile i64, ptr %p, align 8
     ret void
   }
 
-  define void @store_large_offset_no_opt(i64* %p) #0 {
+  define void @store_large_offset_no_opt(ptr %p) #0 {
   entry:
-    %0 = getelementptr inbounds i64, i64* %p, i64 100
-    store volatile i64 1, i64* %0, align 8
-    %1 = getelementptr inbounds i64, i64* %p, i64 101
-    store volatile i64 3, i64* %1, align 8
+    %0 = getelementptr inbounds i64, ptr %p, i64 100
+    store volatile i64 1, ptr %0, align 8
+    %1 = getelementptr inbounds i64, ptr %p, i64 101
+    store volatile i64 3, ptr %1, align 8
     ret void
   }
 
-  define void @load_large_offset_no_opt(i64* %p) #0 {
+  define void @load_large_offset_no_opt(ptr %p) #0 {
   entry:
-    %0 = getelementptr inbounds i64, i64* %p, i64 100
-    %a = load volatile i64, i64* %0, align 8
-    %1 = getelementptr inbounds i64, i64* %p, i64 101
-    %b = load volatile i64, i64* %1, align 8
+    %0 = getelementptr inbounds i64, ptr %p, i64 100
+    %a = load volatile i64, ptr %0, align 8
+    %1 = getelementptr inbounds i64, ptr %p, i64 101
+    %b = load volatile i64, ptr %1, align 8
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/RISCV/make-compressible.mir b/llvm/test/CodeGen/RISCV/make-compressible.mir
index 91c2d95b5051d..2105a13bc8c7b 100644
--- a/llvm/test/CodeGen/RISCV/make-compressible.mir
+++ b/llvm/test/CodeGen/RISCV/make-compressible.mir
@@ -5,284 +5,284 @@
 # RUN:   -run-pass=riscv-make-compressible | FileCheck --check-prefix=RV64 %s
 --- |
 
-  define void @store_common_value(i32* %a, i32* %b, i32* %c) #0 {
+  define void @store_common_value(ptr %a, ptr %b, ptr %c) #0 {
   entry:
-    store i32 0, i32* %a, align 4
-    store i32 0, i32* %b, align 4
-    store i32 0, i32* %c, align 4
+    store i32 0, ptr %a, align 4
+    store i32 0, ptr %b, align 4
+    store i32 0, ptr %c, align 4
     ret void
   }
 
-  define void @store_common_value_float(float* %a, float* %b, float* %c, float %d, float %e, float %f, float %g, float %h, float %i, float %j) #0 {
+  define void @store_common_value_float(ptr %a, ptr %b, ptr %c, float %d, float %e, float %f, float %g, float %h, float %i, float %j) #0 {
   entry:
-    store float %j, float* %a, align 4
-    store float %j, float* %b, align 4
-    store float %j, float* %c, align 4
+    store float %j, ptr %a, align 4
+    store float %j, ptr %b, align 4
+    store float %j, ptr %c, align 4
     ret void
   }
 
-  define void @store_common_value_double(double* %a, double* %b, double* %c, double %d, double %e, double %f, double %g, double %h, double %i, double %j) #0 {
+  define void @store_common_value_double(ptr %a, ptr %b, ptr %c, double %d, double %e, double %f, double %g, double %h, double %i, double %j) #0 {
   entry:
-    store double %j, double* %a, align 8
-    store double %j, double* %b, align 8
-    store double %j, double* %c, align 8
+    store double %j, ptr %a, align 8
+    store double %j, ptr %b, align 8
+    store double %j, ptr %c, align 8
     ret void
   }
 
-  define void @store_common_ptr(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32* %p) #0 {
+  define void @store_common_ptr(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, ptr %p) #0 {
   entry:
-    store volatile i32 1, i32* %p, align 4
-    store volatile i32 3, i32* %p, align 4
-    store volatile i32 5, i32* %p, align 4
+    store volatile i32 1, ptr %p, align 4
+    store volatile i32 3, ptr %p, align 4
+    store volatile i32 5, ptr %p, align 4
     ret void
   }
 
-  define void @store_common_ptr_self(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32* %p) #0 {
+  define void @store_common_ptr_self(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, ptr %p) #0 {
   entry:
-    %q = bitcast i32* %p to i32**
-    store volatile i32 1, i32* %p, align 4
-    store volatile i32 3, i32* %p, align 4
-    store volatile i32* %p, i32** %q, align 4
+    %q = bitcast ptr %p to ptr
+    store volatile i32 1, ptr %p, align 4
+    store volatile i32 3, ptr %p, align 4
+    store volatile ptr %p, ptr %q, align 4
     ret void
   }
 
-  define void @store_common_ptr_float(float %a, float %b, float %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h, i32 %i, float* %p) #0 {
+  define void @store_common_ptr_float(float %a, float %b, float %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h, i32 %i, ptr %p) #0 {
   entry:
-    store volatile float %a, float* %p, align 4
-    store volatile float %b, float* %p, align 4
-    store volatile float %c, float* %p, align 4
+    store volatile float %a, ptr %p, align 4
+    store volatile float %b, ptr %p, align 4
+    store volatile float %c, ptr %p, align 4
     ret void
   }
 
-  define void @store_common_ptr_double(double %a, double %b, double %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h, i32 %i, double* %p) #0 {
+  define void @store_common_ptr_double(double %a, double %b, double %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h, i32 %i, ptr %p) #0 {
   entry:
-    store volatile double %a, double* %p, align 8
-    store volatile double %b, double* %p, align 8
-    store volatile double %c, double* %p, align 8
+    store volatile double %a, ptr %p, align 8
+    store volatile double %b, ptr %p, align 8
+    store volatile double %c, ptr %p, align 8
     ret void
   }
 
-  define void @load_common_ptr(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32* %p) #0 {
+  define void @load_common_ptr(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, ptr %p) #0 {
   entry:
-    %g = load volatile i32, i32* %p, align 4
-    %h = load volatile i32, i32* %p, align 4
-    %i = load volatile i32, i32* %p, align 4
+    %g = load volatile i32, ptr %p, align 4
+    %h = load volatile i32, ptr %p, align 4
+    %i = load volatile i32, ptr %p, align 4
     ret void
   }
 
-  define void @load_common_ptr_float(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, float* %g) #0 {
+  define void @load_common_ptr_float(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, ptr %g) #0 {
   entry:
-    %0 = load float, float* %g, align 4
-    %arrayidx1 = getelementptr inbounds float, float* %g, i32 1
-    %1 = load float, float* %arrayidx1, align 4
-    %arrayidx2 = getelementptr inbounds float, float* %g, i32 2
-    %2 = load float, float* %arrayidx2, align 4
+    %0 = load float, ptr %g, align 4
+    %arrayidx1 = getelementptr inbounds float, ptr %g, i32 1
+    %1 = load float, ptr %arrayidx1, align 4
+    %arrayidx2 = getelementptr inbounds float, ptr %g, i32 2
+    %2 = load float, ptr %arrayidx2, align 4
     tail call void @load_common_ptr_float_1(float %0, float %1, float %2)
     ret void
   }
 
   declare void @load_common_ptr_float_1(float, float, float) #0
 
-  define void @load_common_ptr_double(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, double* %g) #0 {
+  define void @load_common_ptr_double(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, ptr %g) #0 {
   entry:
-    %0 = load double, double* %g, align 8
-    %arrayidx1 = getelementptr inbounds double, double* %g, i32 1
-    %1 = load double, double* %arrayidx1, align 8
-    %arrayidx2 = getelementptr inbounds double, double* %g, i32 2
-    %2 = load double, double* %arrayidx2, align 8
+    %0 = load double, ptr %g, align 8
+    %arrayidx1 = getelementptr inbounds double, ptr %g, i32 1
+    %1 = load double, ptr %arrayidx1, align 8
+    %arrayidx2 = getelementptr inbounds double, ptr %g, i32 2
+    %2 = load double, ptr %arrayidx2, align 8
     tail call void @load_common_ptr_double_1(double %0, double %1, double %2)
     ret void
   }
 
   declare void @load_common_ptr_double_1(double, double, double) #0
 
-  define void @store_large_offset(i32* %p) #0 {
+  define void @store_large_offset(ptr %p) #0 {
   entry:
-    %0 = getelementptr inbounds i32, i32* %p, i32 100
-    store volatile i32 1, i32* %0, align 4
-    %1 = getelementptr inbounds i32, i32* %p, i32 101
-    store volatile i32 3, i32* %1, align 4
-    %2 = getelementptr inbounds i32, i32* %p, i32 102
-    store volatile i32 5, i32* %2, align 4
-    %3 = getelementptr inbounds i32, i32* %p, i32 103
-    store volatile i32 7, i32* %3, align 4
+    %0 = getelementptr inbounds i32, ptr %p, i32 100
+    store volatile i32 1, ptr %0, align 4
+    %1 = getelementptr inbounds i32, ptr %p, i32 101
+    store volatile i32 3, ptr %1, align 4
+    %2 = getelementptr inbounds i32, ptr %p, i32 102
+    store volatile i32 5, ptr %2, align 4
+    %3 = getelementptr inbounds i32, ptr %p, i32 103
+    store volatile i32 7, ptr %3, align 4
     ret void
   }
 
-  define void @store_large_offset_float(float* %p, float %a, float %b, float %c, float %d) #0 {
+  define void @store_large_offset_float(ptr %p, float %a, float %b, float %c, float %d) #0 {
   entry:
-    %0 = getelementptr inbounds float, float* %p, i32 100
-    store volatile float %a, float* %0, align 4
-    %1 = getelementptr inbounds float, float* %p, i32 101
-    store volatile float %b, float* %1, align 4
-    %2 = getelementptr inbounds float, float* %p, i32 102
-    store volatile float %c, float* %2, align 4
-    %3 = getelementptr inbounds float, float* %p, i32 103
-    store volatile float %d, float* %3, align 4
+    %0 = getelementptr inbounds float, ptr %p, i32 100
+    store volatile float %a, ptr %0, align 4
+    %1 = getelementptr inbounds float, ptr %p, i32 101
+    store volatile float %b, ptr %1, align 4
+    %2 = getelementptr inbounds float, ptr %p, i32 102
+    store volatile float %c, ptr %2, align 4
+    %3 = getelementptr inbounds float, ptr %p, i32 103
+    store volatile float %d, ptr %3, align 4
     ret void
   }
 
-  define void @store_large_offset_double(double* %p, double %a, double %b, double %c, double %d) #0 {
+  define void @store_large_offset_double(ptr %p, double %a, double %b, double %c, double %d) #0 {
   entry:
-    %0 = getelementptr inbounds double, double* %p, i32 100
-    store volatile double %a, double* %0, align 8
-    %1 = getelementptr inbounds double, double* %p, i32 101
-    store volatile double %b, double* %1, align 8
-    %2 = getelementptr inbounds double, double* %p, i32 102
-    store volatile double %c, double* %2, align 8
-    %3 = getelementptr inbounds double, double* %p, i32 103
-    store volatile double %d, double* %3, align 8
+    %0 = getelementptr inbounds double, ptr %p, i32 100
+    store volatile double %a, ptr %0, align 8
+    %1 = getelementptr inbounds double, ptr %p, i32 101
+    store volatile double %b, ptr %1, align 8
+    %2 = getelementptr inbounds double, ptr %p, i32 102
+    store volatile double %c, ptr %2, align 8
+    %3 = getelementptr inbounds double, ptr %p, i32 103
+    store volatile double %d, ptr %3, align 8
     ret void
   }
 
-  define void @load_large_offset(i32* %p) #0 {
+  define void @load_large_offset(ptr %p) #0 {
   entry:
-    %0 = getelementptr inbounds i32, i32* %p, i32 100
-    %a = load volatile i32, i32* %0, align 4
-    %1 = getelementptr inbounds i32, i32* %p, i32 101
-    %b = load volatile i32, i32* %1, align 4
-    %2 = getelementptr inbounds i32, i32* %p, i32 102
-    %c = load volatile i32, i32* %2, align 4
-    %3 = getelementptr inbounds i32, i32* %p, i32 103
-    %d = load volatile i32, i32* %3, align 4
+    %0 = getelementptr inbounds i32, ptr %p, i32 100
+    %a = load volatile i32, ptr %0, align 4
+    %1 = getelementptr inbounds i32, ptr %p, i32 101
+    %b = load volatile i32, ptr %1, align 4
+    %2 = getelementptr inbounds i32, ptr %p, i32 102
+    %c = load volatile i32, ptr %2, align 4
+    %3 = getelementptr inbounds i32, ptr %p, i32 103
+    %d = load volatile i32, ptr %3, align 4
     ret void
   }
 
-  define void @load_large_offset_float(float* %p) #0 {
+  define void @load_large_offset_float(ptr %p) #0 {
   entry:
-    %arrayidx = getelementptr inbounds float, float* %p, i32 100
-    %0 = load float, float* %arrayidx, align 4
-    %arrayidx1 = getelementptr inbounds float, float* %p, i32 101
-    %1 = load float, float* %arrayidx1, align 4
-    %arrayidx2 = getelementptr inbounds float, float* %p, i32 102
-    %2 = load float, float* %arrayidx2, align 4
+    %arrayidx = getelementptr inbounds float, ptr %p, i32 100
+    %0 = load float, ptr %arrayidx, align 4
+    %arrayidx1 = getelementptr inbounds float, ptr %p, i32 101
+    %1 = load float, ptr %arrayidx1, align 4
+    %arrayidx2 = getelementptr inbounds float, ptr %p, i32 102
+    %2 = load float, ptr %arrayidx2, align 4
     tail call void @load_large_offset_float_1(float %0, float %1, float %2)
     ret void
   }
 
   declare void @load_large_offset_float_1(float, float, float) #0
 
-  define void @load_large_offset_double(double* %p) #0 {
+  define void @load_large_offset_double(ptr %p) #0 {
   entry:
-    %arrayidx = getelementptr inbounds double, double* %p, i32 100
-    %0 = load double, double* %arrayidx, align 8
-    %arrayidx1 = getelementptr inbounds double, double* %p, i32 101
-    %1 = load double, double* %arrayidx1, align 8
-    %arrayidx2 = getelementptr inbounds double, double* %p, i32 102
-    %2 = load double, double* %arrayidx2, align 8
+    %arrayidx = getelementptr inbounds double, ptr %p, i32 100
+    %0 = load double, ptr %arrayidx, align 8
+    %arrayidx1 = getelementptr inbounds double, ptr %p, i32 101
+    %1 = load double, ptr %arrayidx1, align 8
+    %arrayidx2 = getelementptr inbounds double, ptr %p, i32 102
+    %2 = load double, ptr %arrayidx2, align 8
     tail call void @load_large_offset_double_1(double %0, double %1, double %2)
     ret void
   }
 
   declare void @load_large_offset_double_1(double, double, double) #0
 
-  define void @store_common_value_no_opt(i32* %a) #0 {
+  define void @store_common_value_no_opt(ptr %a) #0 {
   entry:
-    store i32 0, i32* %a, align 4
+    store i32 0, ptr %a, align 4
     ret void
   }
 
-  define void @store_common_value_float_no_opt(float* %a, float %b, float %c, float %d, float %e, float %f, float %g, float %h) #0 {
+  define void @store_common_value_float_no_opt(ptr %a, float %b, float %c, float %d, float %e, float %f, float %g, float %h) #0 {
   entry:
-    store float %h, float* %a, align 4
+    store float %h, ptr %a, align 4
     ret void
   }
 
-  define void @store_common_value_double_no_opt(double* %a, double %b, double %c, double %d, double %e, double %f, double %g, double %h) #0 {
+  define void @store_common_value_double_no_opt(ptr %a, double %b, double %c, double %d, double %e, double %f, double %g, double %h) #0 {
   entry:
-    store double %h, double* %a, align 8
+    store double %h, ptr %a, align 8
     ret void
   }
 
-  define void @store_common_ptr_no_opt(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32* %p) #0 {
+  define void @store_common_ptr_no_opt(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, ptr %p) #0 {
   entry:
-    store volatile i32 1, i32* %p, align 4
+    store volatile i32 1, ptr %p, align 4
     ret void
   }
 
-  define void @store_common_ptr_float_no_opt(float %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, float* %p) #0 {
+  define void @store_common_ptr_float_no_opt(float %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, ptr %p) #0 {
   entry:
-    store volatile float %a, float* %p, align 4
+    store volatile float %a, ptr %p, align 4
     ret void
   }
 
-  define void @store_common_ptr_double_no_opt(double %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, double* %p) #0 {
+  define void @store_common_ptr_double_no_opt(double %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, ptr %p) #0 {
   entry:
-    store volatile double %a, double* %p, align 8
+    store volatile double %a, ptr %p, align 8
     ret void
   }
 
-  define void @load_common_ptr_no_opt(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32* %p) #0 {
+  define void @load_common_ptr_no_opt(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, ptr %p) #0 {
   entry:
-    %g = load volatile i32, i32* %p, align 4
+    %g = load volatile i32, ptr %p, align 4
     ret void
   }
 
-  define float @load_common_ptr_float_no_opt(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, float* %g) #0 {
+  define float @load_common_ptr_float_no_opt(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, ptr %g) #0 {
   entry:
-    %0 = load float, float* %g, align 4
+    %0 = load float, ptr %g, align 4
     ret float %0
   }
 
-  define double @load_common_ptr_double_no_opt(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, double* %g) #0 {
+  define double @load_common_ptr_double_no_opt(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, ptr %g) #0 {
   entry:
-    %0 = load double, double* %g, align 8
+    %0 = load double, ptr %g, align 8
     ret double %0
   }
 
-  define void @store_large_offset_no_opt(i32* %p) #0 {
+  define void @store_large_offset_no_opt(ptr %p) #0 {
   entry:
-    %0 = getelementptr inbounds i32, i32* %p, i32 100
-    store volatile i32 1, i32* %0, align 4
-    %1 = getelementptr inbounds i32, i32* %p, i32 101
-    store volatile i32 3, i32* %1, align 4
+    %0 = getelementptr inbounds i32, ptr %p, i32 100
+    store volatile i32 1, ptr %0, align 4
+    %1 = getelementptr inbounds i32, ptr %p, i32 101
+    store volatile i32 3, ptr %1, align 4
     ret void
   }
 
-  define void @store_large_offset_float_no_opt(float* %p, float %a, float %b) #0 {
+  define void @store_large_offset_float_no_opt(ptr %p, float %a, float %b) #0 {
   entry:
-    %0 = getelementptr inbounds float, float* %p, i32 100
-    store volatile float %a, float* %0, align 4
-    %1 = getelementptr inbounds float, float* %p, i32 101
-    store volatile float %b, float* %1, align 4
+    %0 = getelementptr inbounds float, ptr %p, i32 100
+    store volatile float %a, ptr %0, align 4
+    %1 = getelementptr inbounds float, ptr %p, i32 101
+    store volatile float %b, ptr %1, align 4
     ret void
   }
 
-  define void @store_large_offset_double_no_opt(double* %p, double %a, double %b) #0 {
+  define void @store_large_offset_double_no_opt(ptr %p, double %a, double %b) #0 {
   entry:
-    %0 = getelementptr inbounds double, double* %p, i32 100
-    store volatile double %a, double* %0, align 8
-    %1 = getelementptr inbounds double, double* %p, i32 101
-    store volatile double %b, double* %1, align 8
+    %0 = getelementptr inbounds double, ptr %p, i32 100
+    store volatile double %a, ptr %0, align 8
+    %1 = getelementptr inbounds double, ptr %p, i32 101
+    store volatile double %b, ptr %1, align 8
     ret void
   }
 
-  define void @load_large_offset_no_opt(i32* %p) #0 {
+  define void @load_large_offset_no_opt(ptr %p) #0 {
   entry:
-    %0 = getelementptr inbounds i32, i32* %p, i32 100
-    %a = load volatile i32, i32* %0, align 4
-    %1 = getelementptr inbounds i32, i32* %p, i32 101
-    %b = load volatile i32, i32* %1, align 4
+    %0 = getelementptr inbounds i32, ptr %p, i32 100
+    %a = load volatile i32, ptr %0, align 4
+    %1 = getelementptr inbounds i32, ptr %p, i32 101
+    %b = load volatile i32, ptr %1, align 4
     ret void
   }
 
-  define { float, float } @load_large_offset_float_no_opt(float* %p) #0 {
+  define { float, float } @load_large_offset_float_no_opt(ptr %p) #0 {
   entry:
-    %arrayidx = getelementptr inbounds float, float* %p, i32 100
-    %0 = load float, float* %arrayidx, align 4
-    %arrayidx1 = getelementptr inbounds float, float* %p, i32 101
-    %1 = load float, float* %arrayidx1, align 4
+    %arrayidx = getelementptr inbounds float, ptr %p, i32 100
+    %0 = load float, ptr %arrayidx, align 4
+    %arrayidx1 = getelementptr inbounds float, ptr %p, i32 101
+    %1 = load float, ptr %arrayidx1, align 4
     %2 = insertvalue { float, float } undef, float %0, 0
     %3 = insertvalue { float, float } %2, float %1, 1
     ret { float, float } %3
   }
 
-  define { double, double } @load_large_offset_double_no_opt(double* %p) #0 {
+  define { double, double } @load_large_offset_double_no_opt(ptr %p) #0 {
   entry:
-    %arrayidx = getelementptr inbounds double, double* %p, i32 100
-    %0 = load double, double* %arrayidx, align 8
-    %arrayidx1 = getelementptr inbounds double, double* %p, i32 101
-    %1 = load double, double* %arrayidx1, align 8
+    %arrayidx = getelementptr inbounds double, ptr %p, i32 100
+    %0 = load double, ptr %arrayidx, align 8
+    %arrayidx1 = getelementptr inbounds double, ptr %p, i32 101
+    %1 = load double, ptr %arrayidx1, align 8
     %2 = insertvalue { double, double } undef, double %0, 0
     %3 = insertvalue { double, double } %2, double %1, 1
     ret { double, double } %3

diff  --git a/llvm/test/CodeGen/RISCV/misched-load-clustering.ll b/llvm/test/CodeGen/RISCV/misched-load-clustering.ll
index d046aaf98f5ed..db41b26271814 100644
--- a/llvm/test/CodeGen/RISCV/misched-load-clustering.ll
+++ b/llvm/test/CodeGen/RISCV/misched-load-clustering.ll
@@ -29,15 +29,15 @@ define i32 @load_clustering_1(ptr nocapture %p) {
 ; LDCLUSTER: SU(5): %6:gpr = LW %0:gpr, 16
 entry:
   %arrayidx0 = getelementptr inbounds i32, ptr %p, i32 3
-  %val0 = load i32, i32* %arrayidx0
+  %val0 = load i32, ptr %arrayidx0
   %arrayidx1 = getelementptr inbounds i32, ptr %p, i32 2
-  %val1 = load i32, i32* %arrayidx1
+  %val1 = load i32, ptr %arrayidx1
   %tmp0 = add i32 %val0, %val1
   %arrayidx2 = getelementptr inbounds i32, ptr %p, i32 1
-  %val2 = load i32, i32* %arrayidx2
+  %val2 = load i32, ptr %arrayidx2
   %tmp1 = add i32 %tmp0, %val2
   %arrayidx3 = getelementptr inbounds i32, ptr %p, i32 4
-  %val3 = load i32, i32* %arrayidx3
+  %val3 = load i32, ptr %arrayidx3
   %tmp2 = add i32 %tmp1, %val3
   ret i32 %tmp2
 }

diff  --git a/llvm/test/CodeGen/RISCV/out-of-reach-emergency-slot.mir b/llvm/test/CodeGen/RISCV/out-of-reach-emergency-slot.mir
index 7c6253b897ffd..08716be713b0f 100644
--- a/llvm/test/CodeGen/RISCV/out-of-reach-emergency-slot.mir
+++ b/llvm/test/CodeGen/RISCV/out-of-reach-emergency-slot.mir
@@ -21,7 +21,7 @@
   target triple = "riscv64"
 
   ; Function Attrs: nounwind
-  define weak_odr dso_local void @foo(i8* %ay) nounwind {
+  define weak_odr dso_local void @foo(ptr %ay) nounwind {
   ; CHECK-LABEL: foo:
   ; CHECK:       # %bb.0: # %entry
   ; CHECK-NEXT:    addi sp, sp, -2032

diff  --git a/llvm/test/CodeGen/RISCV/prefetch.ll b/llvm/test/CodeGen/RISCV/prefetch.ll
index 7ef33f8aa1303..e4c9b91b0dbbd 100644
--- a/llvm/test/CodeGen/RISCV/prefetch.ll
+++ b/llvm/test/CodeGen/RISCV/prefetch.ll
@@ -695,7 +695,7 @@ define void @test_prefetch_frameindex_0() nounwind {
 ; RV64ZICBOPZIHINTNTL-NEXT:    addi sp, sp, 512
 ; RV64ZICBOPZIHINTNTL-NEXT:    ret
   %data = alloca [128 x i32], align 4
-  %base = bitcast [128 x i32]* %data to i8*
+  %base = bitcast ptr %data to ptr
   %ptr = getelementptr [128 x i32], ptr %base, i32 0, i32 0
   call void @llvm.prefetch(ptr %ptr, i32 0, i32 0, i32 1)
   ret void
@@ -759,7 +759,7 @@ define void @test_prefetch_frameindex_1() nounwind {
 ; RV64ZICBOPZIHINTNTL-NEXT:    add sp, sp, a0
 ; RV64ZICBOPZIHINTNTL-NEXT:    ret
   %data = alloca [1024 x i32], align 4
-  %ptr = bitcast [1024 x i32]* %data to i8*
+  %ptr = bitcast ptr %data to ptr
   call void @llvm.prefetch(ptr %ptr, i32 0, i32 0, i32 1)
   ret void
 }
@@ -802,7 +802,7 @@ define void @test_prefetch_frameindex_2() nounwind {
 ; RV64ZICBOPZIHINTNTL-NEXT:    addi sp, sp, 512
 ; RV64ZICBOPZIHINTNTL-NEXT:    ret
   %data = alloca [128 x i32], align 4
-  %base = bitcast [128 x i32]* %data to i8*
+  %base = bitcast ptr %data to ptr
   %ptr = getelementptr [128 x i32], ptr %base, i32 0, i32 4
   call void @llvm.prefetch(ptr %ptr, i32 0, i32 0, i32 1)
   ret void
@@ -846,7 +846,7 @@ define void @test_prefetch_frameindex_3() nounwind {
 ; RV64ZICBOPZIHINTNTL-NEXT:    addi sp, sp, 512
 ; RV64ZICBOPZIHINTNTL-NEXT:    ret
   %data = alloca [128 x i32], align 4
-  %base = bitcast [128 x i32]* %data to i8*
+  %base = bitcast ptr %data to ptr
   %ptr = getelementptr [128 x i32], ptr %base, i32 0, i32 -4
   call void @llvm.prefetch(ptr %ptr, i32 0, i32 0, i32 1)
   ret void
@@ -887,7 +887,7 @@ define void @test_prefetch_frameindex_4() nounwind {
 ; RV64ZICBOPZIHINTNTL-NEXT:    addi sp, sp, 512
 ; RV64ZICBOPZIHINTNTL-NEXT:    ret
   %data = alloca [128 x i32], align 4
-  %base = bitcast [128 x i32]* %data to i8*
+  %base = bitcast ptr %data to ptr
   %ptr = getelementptr [128 x i32], ptr %base, i32 0, i32 8
   call void @llvm.prefetch(ptr %ptr, i32 0, i32 0, i32 1)
   ret void
@@ -928,7 +928,7 @@ define void @test_prefetch_frameindex_5() nounwind {
 ; RV64ZICBOPZIHINTNTL-NEXT:    addi sp, sp, 512
 ; RV64ZICBOPZIHINTNTL-NEXT:    ret
   %data = alloca [128 x i32], align 4
-  %base = bitcast [128 x i32]* %data to i8*
+  %base = bitcast ptr %data to ptr
   %ptr = getelementptr [128 x i32], ptr %base, i32 0, i32 -8
   call void @llvm.prefetch(ptr %ptr, i32 0, i32 0, i32 1)
   ret void
@@ -969,7 +969,7 @@ define void @test_prefetch_frameindex_6() nounwind {
 ; RV64ZICBOPZIHINTNTL-NEXT:    addi sp, sp, 512
 ; RV64ZICBOPZIHINTNTL-NEXT:    ret
   %data = alloca [128 x i32], align 4
-  %base = bitcast [128 x i32]* %data to i8*
+  %base = bitcast ptr %data to ptr
   %ptr = getelementptr [128 x i32], ptr %base, i32 0, i32 504
   call void @llvm.prefetch(ptr %ptr, i32 0, i32 0, i32 1)
   ret void
@@ -1010,7 +1010,7 @@ define void @test_prefetch_frameindex_7() nounwind {
 ; RV64ZICBOPZIHINTNTL-NEXT:    addi sp, sp, 512
 ; RV64ZICBOPZIHINTNTL-NEXT:    ret
   %data = alloca [128 x i32], align 4
-  %base = bitcast [128 x i32]* %data to i8*
+  %base = bitcast ptr %data to ptr
   %ptr = getelementptr [128 x i32], ptr %base, i32 0, i32 -512
   call void @llvm.prefetch(ptr %ptr, i32 0, i32 0, i32 1)
   ret void
@@ -1054,7 +1054,7 @@ define void @test_prefetch_frameindex_8() nounwind {
 ; RV64ZICBOPZIHINTNTL-NEXT:    addi sp, sp, 512
 ; RV64ZICBOPZIHINTNTL-NEXT:    ret
   %data = alloca [128 x i32], align 4
-  %base = bitcast [128 x i32]* %data to i8*
+  %base = bitcast ptr %data to ptr
   %ptr = getelementptr [128 x i32], ptr %base, i32 0, i32 505
   call void @llvm.prefetch(ptr %ptr, i32 0, i32 0, i32 1)
   ret void
@@ -1101,7 +1101,7 @@ define void @test_prefetch_frameindex_9() nounwind {
 ; RV64ZICBOPZIHINTNTL-NEXT:    addi sp, sp, 512
 ; RV64ZICBOPZIHINTNTL-NEXT:    ret
   %data = alloca [128 x i32], align 4
-  %base = bitcast [128 x i32]* %data to i8*
+  %base = bitcast ptr %data to ptr
   %ptr = getelementptr [128 x i32], ptr %base, i32 0, i32 -513
   call void @llvm.prefetch(ptr %ptr, i32 0, i32 0, i32 1)
   ret void
@@ -1134,7 +1134,7 @@ define void @test_prefetch_constant_address_0() nounwind {
 ; RV64ZICBOPZIHINTNTL-NEXT:    ntl.all
 ; RV64ZICBOPZIHINTNTL-NEXT:    prefetch.r 32(a0)
 ; RV64ZICBOPZIHINTNTL-NEXT:    ret
-  %ptr = inttoptr i64 4128 to i8*
+  %ptr = inttoptr i64 4128 to ptr
   call void @llvm.prefetch(ptr %ptr, i32 0, i32 0, i32 1)
   ret void
 }
@@ -1169,7 +1169,7 @@ define void @test_prefetch_constant_address_1() nounwind {
 ; RV64ZICBOPZIHINTNTL-NEXT:    ntl.all
 ; RV64ZICBOPZIHINTNTL-NEXT:    prefetch.r 0(a0)
 ; RV64ZICBOPZIHINTNTL-NEXT:    ret
-  %ptr = inttoptr i64 4127 to i8*
+  %ptr = inttoptr i64 4127 to ptr
   call void @llvm.prefetch(ptr %ptr, i32 0, i32 0, i32 1)
   ret void
 }
@@ -1201,7 +1201,7 @@ define void @test_prefetch_constant_address_2() nounwind {
 ; RV64ZICBOPZIHINTNTL-NEXT:    ntl.all
 ; RV64ZICBOPZIHINTNTL-NEXT:    prefetch.r 32(a0)
 ; RV64ZICBOPZIHINTNTL-NEXT:    ret
-  %ptr = inttoptr i64 18446744073709490208 to i8*
+  %ptr = inttoptr i64 18446744073709490208 to ptr
   call void @llvm.prefetch(ptr %ptr, i32 0, i32 0, i32 1)
   ret void
 }
@@ -1236,7 +1236,7 @@ define void @test_prefetch_constant_address_3() nounwind {
 ; RV64ZICBOPZIHINTNTL-NEXT:    ntl.all
 ; RV64ZICBOPZIHINTNTL-NEXT:    prefetch.r 0(a0)
 ; RV64ZICBOPZIHINTNTL-NEXT:    ret
-  %ptr = inttoptr i64 18446744073709490207 to i8*
+  %ptr = inttoptr i64 18446744073709490207 to ptr
   call void @llvm.prefetch(ptr %ptr, i32 0, i32 0, i32 1)
   ret void
 }

diff  --git a/llvm/test/CodeGen/RISCV/push-pop-popret.ll b/llvm/test/CodeGen/RISCV/push-pop-popret.ll
index 945e7b46f8c9f..09a91498a15d2 100644
--- a/llvm/test/CodeGen/RISCV/push-pop-popret.ll
+++ b/llvm/test/CodeGen/RISCV/push-pop-popret.ll
@@ -12,9 +12,9 @@
 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
 ; RUN: | FileCheck -check-prefixes=RV64I %s
 
-declare void @test(i8*)
-declare void @callee_void(i8*)
-declare i32 @callee(i8*)
+declare void @test(ptr)
+declare void @callee_void(ptr)
+declare i32 @callee(ptr)
 
 define i32 @foo() {
 ; RV32IZCMP-LABEL: foo:
@@ -87,8 +87,8 @@ define i32 @foo() {
 ; RV64I-NEXT:    addi sp, sp, 528
 ; RV64I-NEXT:    ret
   %1 = alloca [512 x i8]
-  %2 = getelementptr [512 x i8], [512 x i8]* %1, i32 0, i32 0
-  call void @test(i8* %2)
+  %2 = getelementptr [512 x i8], ptr %1, i32 0, i32 0
+  call void @test(ptr %2)
   ret i32 0
 }
 
@@ -208,7 +208,7 @@ define i32 @pushpopret0(i32 signext %size){
 ; RV64I-NEXT:    ret
 entry:
   %0 = alloca i8, i32 %size, align 16
-  call void @callee_void(i8* nonnull %0)
+  call void @callee_void(ptr nonnull %0)
   ret i32 0
 }
 
@@ -332,7 +332,7 @@ define i32 @pushpopret1(i32 signext %size) {
 ; RV64I-NEXT:    ret
 entry:
   %0 = alloca i8, i32 %size, align 16
-  call void @callee_void(i8* nonnull %0)
+  call void @callee_void(ptr nonnull %0)
   ret i32 1
 }
 
@@ -456,7 +456,7 @@ define i32 @pushpopretneg1(i32 signext %size) {
 ; RV64I-NEXT:    ret
 entry:
   %0 = alloca i8, i32 %size, align 16
-  call void @callee_void(i8* nonnull %0)
+  call void @callee_void(ptr nonnull %0)
   ret i32 -1
 }
 
@@ -580,7 +580,7 @@ define i32 @pushpopret2(i32 signext %size) {
 ; RV64I-NEXT:    ret
 entry:
   %0 = alloca i8, i32 %size, align 16
-  call void @callee_void(i8* nonnull %0)
+  call void @callee_void(ptr nonnull %0)
   ret i32 2
 }
 
@@ -696,7 +696,7 @@ define dso_local i32 @tailcall(i32 signext %size) local_unnamed_addr #0 {
 ; RV64I-NEXT:    tail callee
 entry:
   %0 = alloca i8, i32 %size, align 16
-  %1 = tail call i32 @callee(i8* nonnull %0)
+  %1 = tail call i32 @callee(ptr nonnull %0)
   ret i32 %1
 }
 
@@ -983,10 +983,10 @@ define i32 @nocompress(i32 signext %size) {
 ; RV64I-NEXT:    tail callee
 entry:
   %0 = alloca i8, i32 %size, align 16
-  %val = load [5 x i32], [5 x i32]* @var
-  call void @callee_void(i8* nonnull %0)
-  store volatile [5 x i32] %val, [5 x i32]* @var
-  %1 = tail call i32 @callee(i8* nonnull %0)
+  %val = load [5 x i32], ptr @var
+  call void @callee_void(ptr nonnull %0)
+  store volatile [5 x i32] %val, ptr @var
+  %1 = tail call i32 @callee(ptr nonnull %0)
   ret i32 %1
 }
 
@@ -1743,7 +1743,7 @@ define void @foo_with_irq() nounwind "interrupt"="user" {
 ; RV64I-NEXT:    ld t6, 0(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 128
 ; RV64I-NEXT:    mret
-  %call = call i32 bitcast (i32 (...)* @foo_test_irq to i32 ()*)()
+  %call = call i32 @foo_test_irq()
   ret void
 }
 
@@ -1789,7 +1789,7 @@ define void @foo_no_irq() nounwind{
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
-  %call = call i32 bitcast (i32 (...)* @foo_test_irq to i32 ()*)()
+  %call = call i32 @foo_test_irq()
   ret void
 }
 
@@ -2537,8 +2537,8 @@ define void @callee_with_irq() nounwind "interrupt"="user" {
 ; RV64I-NEXT:    ld t6, 48(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 272
 ; RV64I-NEXT:    mret
-  %val = load [32 x i32], [32 x i32]* @var_test_irq
-  store volatile [32 x i32] %val, [32 x i32]* @var_test_irq
+  %val = load [32 x i32], ptr @var_test_irq
+  store volatile [32 x i32] %val, ptr @var_test_irq
   ret void
 }
 
@@ -3094,8 +3094,8 @@ define void @callee_no_irq() nounwind{
 ; RV64I-NEXT:    ld s11, 56(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 160
 ; RV64I-NEXT:    ret
-  %val = load [32 x i32], [32 x i32]* @var_test_irq
-  store volatile [32 x i32] %val, [32 x i32]* @var_test_irq
+  %val = load [32 x i32], ptr @var_test_irq
+  store volatile [32 x i32] %val, ptr @var_test_irq
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
index 804bc053728d8..3ce56318426ad 100644
--- a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
+++ b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
@@ -146,7 +146,7 @@ entry:
   %i4 = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> %i2, <vscale x 16 x half> %i1, i64 7, i64 36)
   %i5 = call <vscale x 16 x i16> @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> %i3, <vscale x 16 x i16> %i3, <vscale x 16 x i16> poison, <vscale x 16 x i1> poison, i64 32, i64 0)
   %i6 = call <vscale x 16 x float> @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> %i4, <vscale x 16 x float> %i2, <vscale x 16 x float> poison, <vscale x 16 x i1> poison, i64 7, i64 36, i64 0)
-  call void @llvm.riscv.vse.nxv16f32.i64(<vscale x 16 x float> %i6, <vscale x 16 x float>* nonnull poison, i64 36)
+  call void @llvm.riscv.vse.nxv16f32.i64(<vscale x 16 x float> %i6, ptr nonnull poison, i64 36)
   ret void
 }
 
@@ -157,4 +157,4 @@ declare <vscale x 16 x i16> @llvm.riscv.vrgather.vv.mask.nxv16i16.i64(<vscale x
 declare <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16.i64(<vscale x 16 x float>, <vscale x 16 x float>, <vscale x 16 x half>, i64, i64)
 declare <vscale x 16 x i16> @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i64, i64 immarg)
 declare <vscale x 16 x float> @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float>, <vscale x 16 x float>, <vscale x 16 x float>, <vscale x 16 x i1>, i64, i64, i64 immarg)
-declare void @llvm.riscv.vse.nxv16f32.i64(<vscale x 16 x float>, <vscale x 16 x float>* nocapture, i64) #3
+declare void @llvm.riscv.vse.nxv16f32.i64(<vscale x 16 x float>, ptr nocapture, i64) #3

diff  --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/xtheadmemidx.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/xtheadmemidx.ll
index c836fc616a553..e557a14979e8b 100644
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/xtheadmemidx.ll
+++ b/llvm/test/CodeGen/RISCV/rv64-legal-i32/xtheadmemidx.ll
@@ -2,553 +2,553 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+d -mattr=+xtheadmemidx -mattr=+m -verify-machineinstrs < %s \
 ; RUN:   -riscv-experimental-rv64-legal-i32 | FileCheck %s -check-prefix=RV64XTHEADMEMIDX
 
-define i8* @lbia(i8* %base, i8* %addr.2, i8 %a) {
+define ptr @lbia(ptr %base, ptr %addr.2, i8 %a) {
 ; RV64XTHEADMEMIDX-LABEL: lbia:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    th.lbia a3, (a0), -1, 0
 ; RV64XTHEADMEMIDX-NEXT:    add a2, a3, a2
 ; RV64XTHEADMEMIDX-NEXT:    sb a2, 0(a1)
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %addr = getelementptr i8, i8* %base, i8 0
-  %ld = load i8, i8* %addr
-  %addr.1 = getelementptr i8, i8* %base, i8 -1
+  %addr = getelementptr i8, ptr %base, i8 0
+  %ld = load i8, ptr %addr
+  %addr.1 = getelementptr i8, ptr %base, i8 -1
   %res = add i8 %ld, %a
-  store i8 %res, i8* %addr.2
-  ret i8* %addr.1
+  store i8 %res, ptr %addr.2
+  ret ptr %addr.1
 }
 
-define i8* @lbib(i8* %base, i8 %a) {
+define ptr @lbib(ptr %base, i8 %a) {
 ; RV64XTHEADMEMIDX-LABEL: lbib:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    th.lbib a2, (a0), 1, 0
 ; RV64XTHEADMEMIDX-NEXT:    add a1, a2, a1
 ; RV64XTHEADMEMIDX-NEXT:    sb a1, 1(a0)
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %addr = getelementptr i8, i8* %base, i8 1
-  %ld = load i8, i8* %addr
-  %addr.1 = getelementptr i8, i8* %base, i8 2
+  %addr = getelementptr i8, ptr %base, i8 1
+  %ld = load i8, ptr %addr
+  %addr.1 = getelementptr i8, ptr %base, i8 2
   %res = add i8 %ld, %a
-  store i8 %res, i8* %addr.1
-  ret i8* %addr
+  store i8 %res, ptr %addr.1
+  ret ptr %addr
 }
 
-define i8* @lbuia(i8* %base, i64* %addr.2, i64 %a) {
+define ptr @lbuia(ptr %base, ptr %addr.2, i64 %a) {
 ; RV64XTHEADMEMIDX-LABEL: lbuia:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    th.lbuia a3, (a0), -1, 0
 ; RV64XTHEADMEMIDX-NEXT:    add a2, a3, a2
 ; RV64XTHEADMEMIDX-NEXT:    sd a2, 0(a1)
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %addr = getelementptr i8, i8* %base, i8 0
-  %ld = load i8, i8* %addr
+  %addr = getelementptr i8, ptr %base, i8 0
+  %ld = load i8, ptr %addr
   %zext = zext i8 %ld to i64
-  %addr.1 = getelementptr i8, i8* %base, i8 -1
+  %addr.1 = getelementptr i8, ptr %base, i8 -1
   %res = add i64 %zext, %a
-  store i64 %res, i64* %addr.2
-  ret i8* %addr.1
+  store i64 %res, ptr %addr.2
+  ret ptr %addr.1
 }
 
-define i8* @lbuib(i8* %base, i64 %a, i64* %addr.1) {
+define ptr @lbuib(ptr %base, i64 %a, ptr %addr.1) {
 ; RV64XTHEADMEMIDX-LABEL: lbuib:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    th.lbuib a3, (a0), 1, 0
 ; RV64XTHEADMEMIDX-NEXT:    add a1, a3, a1
 ; RV64XTHEADMEMIDX-NEXT:    sd a1, 0(a2)
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %addr = getelementptr i8, i8* %base, i8 1
-  %ld = load i8, i8* %addr
+  %addr = getelementptr i8, ptr %base, i8 1
+  %ld = load i8, ptr %addr
   %zext = zext i8 %ld to i64
   %res = add i64 %zext, %a
-  store i64 %res, i64* %addr.1
-  ret i8* %addr
+  store i64 %res, ptr %addr.1
+  ret ptr %addr
 }
 
-define i16* @lhia(i16* %base, i16* %addr.2, i16 %a) {
+define ptr @lhia(ptr %base, ptr %addr.2, i16 %a) {
 ; RV64XTHEADMEMIDX-LABEL: lhia:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    th.lhia a3, (a0), -16, 1
 ; RV64XTHEADMEMIDX-NEXT:    add a2, a3, a2
 ; RV64XTHEADMEMIDX-NEXT:    sh a2, 0(a1)
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %addr = getelementptr i16, i16* %base, i16 0
-  %ld = load i16, i16* %addr
-  %addr.1 = getelementptr i16, i16* %base, i16 -16
+  %addr = getelementptr i16, ptr %base, i16 0
+  %ld = load i16, ptr %addr
+  %addr.1 = getelementptr i16, ptr %base, i16 -16
   %res = add i16 %ld, %a
-  store i16 %res, i16* %addr.2
-  ret i16* %addr.1
+  store i16 %res, ptr %addr.2
+  ret ptr %addr.1
 }
 
-define i16* @lhib(i16* %base, i16 %a) {
+define ptr @lhib(ptr %base, i16 %a) {
 ; RV64XTHEADMEMIDX-LABEL: lhib:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    th.lhib a2, (a0), 2, 0
 ; RV64XTHEADMEMIDX-NEXT:    add a1, a2, a1
 ; RV64XTHEADMEMIDX-NEXT:    sh a1, 2(a0)
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %addr = getelementptr i16, i16* %base, i16 1
-  %ld = load i16, i16* %addr
-  %addr.1 = getelementptr i16, i16* %base, i16 2
+  %addr = getelementptr i16, ptr %base, i16 1
+  %ld = load i16, ptr %addr
+  %addr.1 = getelementptr i16, ptr %base, i16 2
   %res = add i16 %ld, %a
-  store i16 %res, i16* %addr.1
-  ret i16* %addr
+  store i16 %res, ptr %addr.1
+  ret ptr %addr
 }
 
-define i16* @lhuia(i16* %base, i64* %addr.2, i64 %a) {
+define ptr @lhuia(ptr %base, ptr %addr.2, i64 %a) {
 ; RV64XTHEADMEMIDX-LABEL: lhuia:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    th.lhuia a3, (a0), -16, 1
 ; RV64XTHEADMEMIDX-NEXT:    add a2, a3, a2
 ; RV64XTHEADMEMIDX-NEXT:    sd a2, 0(a1)
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %addr = getelementptr i16, i16* %base, i16 0
-  %ld = load i16, i16* %addr
+  %addr = getelementptr i16, ptr %base, i16 0
+  %ld = load i16, ptr %addr
   %zext = zext i16 %ld to i64
-  %addr.1 = getelementptr i16, i16* %base, i16 -16
+  %addr.1 = getelementptr i16, ptr %base, i16 -16
   %res = add i64 %zext, %a
-  store i64 %res, i64* %addr.2
-  ret i16* %addr.1
+  store i64 %res, ptr %addr.2
+  ret ptr %addr.1
 }
 
-define i16* @lhuib(i16* %base, i64 %a, i64* %addr.1) {
+define ptr @lhuib(ptr %base, i64 %a, ptr %addr.1) {
 ; RV64XTHEADMEMIDX-LABEL: lhuib:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    th.lhuib a3, (a0), 2, 0
 ; RV64XTHEADMEMIDX-NEXT:    add a1, a3, a1
 ; RV64XTHEADMEMIDX-NEXT:    sd a1, 0(a2)
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %addr = getelementptr i16, i16* %base, i16 1
-  %ld = load i16, i16* %addr
+  %addr = getelementptr i16, ptr %base, i16 1
+  %ld = load i16, ptr %addr
   %zext = zext i16 %ld to i64
   %res = add i64 %zext, %a
-  store i64 %res, i64* %addr.1
-  ret i16* %addr
+  store i64 %res, ptr %addr.1
+  ret ptr %addr
 }
 
-define i32* @lwia(i32* %base, i32* %addr.2, i32 %a) {
+define ptr @lwia(ptr %base, ptr %addr.2, i32 %a) {
 ; RV64XTHEADMEMIDX-LABEL: lwia:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    th.lwia a3, (a0), -16, 2
 ; RV64XTHEADMEMIDX-NEXT:    add a2, a3, a2
 ; RV64XTHEADMEMIDX-NEXT:    sw a2, 0(a1)
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %addr = getelementptr i32, i32* %base, i32 0
-  %ld = load i32, i32* %addr
-  %addr.1 = getelementptr i32, i32* %base, i32 -16
+  %addr = getelementptr i32, ptr %base, i32 0
+  %ld = load i32, ptr %addr
+  %addr.1 = getelementptr i32, ptr %base, i32 -16
   %res = add i32 %ld, %a
-  store i32 %res, i32* %addr.2
-  ret i32* %addr.1
+  store i32 %res, ptr %addr.2
+  ret ptr %addr.1
 }
 
-define i32* @lwib(i32* %base, i32 %a) {
+define ptr @lwib(ptr %base, i32 %a) {
 ; RV64XTHEADMEMIDX-LABEL: lwib:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    th.lwib a2, (a0), 4, 0
 ; RV64XTHEADMEMIDX-NEXT:    add a1, a2, a1
 ; RV64XTHEADMEMIDX-NEXT:    sw a1, 4(a0)
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %addr = getelementptr i32, i32* %base, i32 1
-  %ld = load i32, i32* %addr
-  %addr.1 = getelementptr i32, i32* %base, i32 2
+  %addr = getelementptr i32, ptr %base, i32 1
+  %ld = load i32, ptr %addr
+  %addr.1 = getelementptr i32, ptr %base, i32 2
   %res = add i32 %ld, %a
-  store i32 %res, i32* %addr.1
-  ret i32* %addr
+  store i32 %res, ptr %addr.1
+  ret ptr %addr
 }
 
-define i32* @lwuia(i32* %base, i64* %addr.2, i64 %a) {
+define ptr @lwuia(ptr %base, ptr %addr.2, i64 %a) {
 ; RV64XTHEADMEMIDX-LABEL: lwuia:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    th.lwuia a3, (a0), -16, 2
 ; RV64XTHEADMEMIDX-NEXT:    add a2, a3, a2
 ; RV64XTHEADMEMIDX-NEXT:    sd a2, 0(a1)
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %addr = getelementptr i32, i32* %base, i32 0
-  %ld = load i32, i32* %addr
+  %addr = getelementptr i32, ptr %base, i32 0
+  %ld = load i32, ptr %addr
   %zext = zext i32 %ld to i64
-  %addr.1 = getelementptr i32, i32* %base, i32 -16
+  %addr.1 = getelementptr i32, ptr %base, i32 -16
   %res = add i64 %zext, %a
-  store i64 %res, i64* %addr.2
-  ret i32* %addr.1
+  store i64 %res, ptr %addr.2
+  ret ptr %addr.1
 }
 
-define i32* @lwuib(i32* %base, i64 %a, i64* %addr.1) {
+define ptr @lwuib(ptr %base, i64 %a, ptr %addr.1) {
 ; RV64XTHEADMEMIDX-LABEL: lwuib:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    th.lwuib a3, (a0), 4, 0
 ; RV64XTHEADMEMIDX-NEXT:    add a1, a3, a1
 ; RV64XTHEADMEMIDX-NEXT:    sd a1, 0(a2)
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %addr = getelementptr i32, i32* %base, i32 1
-  %ld = load i32, i32* %addr
+  %addr = getelementptr i32, ptr %base, i32 1
+  %ld = load i32, ptr %addr
   %zext = zext i32 %ld to i64
   %res = add i64 %zext, %a
-  store i64 %res, i64* %addr.1
-  ret i32* %addr
+  store i64 %res, ptr %addr.1
+  ret ptr %addr
 }
 
-define i64* @ldia(i64* %base, i64* %addr.2, i64 %a) {
+define ptr @ldia(ptr %base, ptr %addr.2, i64 %a) {
 ; RV64XTHEADMEMIDX-LABEL: ldia:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    th.ldia a3, (a0), -16, 3
 ; RV64XTHEADMEMIDX-NEXT:    add a2, a3, a2
 ; RV64XTHEADMEMIDX-NEXT:    sd a2, 0(a1)
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %addr = getelementptr i64, i64* %base, i64 0
-  %ld = load i64, i64* %addr
-  %addr.1 = getelementptr i64, i64* %base, i64 -16
+  %addr = getelementptr i64, ptr %base, i64 0
+  %ld = load i64, ptr %addr
+  %addr.1 = getelementptr i64, ptr %base, i64 -16
   %res = add i64 %ld, %a
-  store i64 %res, i64* %addr.2
-  ret i64* %addr.1
+  store i64 %res, ptr %addr.2
+  ret ptr %addr.1
 }
 
-define i64* @ldib(i64* %base, i64 %a) {
+define ptr @ldib(ptr %base, i64 %a) {
 ; RV64XTHEADMEMIDX-LABEL: ldib:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    th.ldib a2, (a0), 8, 0
 ; RV64XTHEADMEMIDX-NEXT:    add a1, a2, a1
 ; RV64XTHEADMEMIDX-NEXT:    sd a1, 8(a0)
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %addr = getelementptr i64, i64* %base, i64 1
-  %ld = load i64, i64* %addr
-  %addr.1 = getelementptr i64, i64* %base, i64 2
+  %addr = getelementptr i64, ptr %base, i64 1
+  %ld = load i64, ptr %addr
+  %addr.1 = getelementptr i64, ptr %base, i64 2
   %res = add i64 %ld, %a
-  store i64 %res, i64* %addr.1
-  ret i64* %addr
+  store i64 %res, ptr %addr.1
+  ret ptr %addr
 }
 
-define i8* @sbia(i8* %base, i8 %a, i8 %b) {
+define ptr @sbia(ptr %base, i8 %a, i8 %b) {
 ; RV64XTHEADMEMIDX-LABEL: sbia:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    addw a1, a1, a2
 ; RV64XTHEADMEMIDX-NEXT:    th.sbia a1, (a0), 1, 0
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %addr.1 = getelementptr i8, i8* %base, i8 1
+  %addr.1 = getelementptr i8, ptr %base, i8 1
   %res = add i8 %a, %b
-  store i8 %res, i8* %base
-  ret i8* %addr.1
+  store i8 %res, ptr %base
+  ret ptr %addr.1
 }
 
-define i8* @sbib(i8* %base, i8 %a, i8 %b) {
+define ptr @sbib(ptr %base, i8 %a, i8 %b) {
 ; RV64XTHEADMEMIDX-LABEL: sbib:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    addw a1, a1, a2
 ; RV64XTHEADMEMIDX-NEXT:    th.sbib a1, (a0), 1, 0
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %addr.1 = getelementptr i8, i8* %base, i8 1
+  %addr.1 = getelementptr i8, ptr %base, i8 1
   %res = add i8 %a, %b
-  store i8 %res, i8* %addr.1
-  ret i8* %addr.1
+  store i8 %res, ptr %addr.1
+  ret ptr %addr.1
 }
 
-define i16* @shia(i16* %base, i16 %a, i16 %b) {
+define ptr @shia(ptr %base, i16 %a, i16 %b) {
 ; RV64XTHEADMEMIDX-LABEL: shia:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    addw a1, a1, a2
 ; RV64XTHEADMEMIDX-NEXT:    th.shia a1, (a0), -9, 1
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %addr.1 = getelementptr i16, i16* %base, i16 -9
+  %addr.1 = getelementptr i16, ptr %base, i16 -9
   %res = add i16 %a, %b
-  store i16 %res, i16* %base
-  ret i16* %addr.1
+  store i16 %res, ptr %base
+  ret ptr %addr.1
 }
 
-define i16* @shib(i16* %base, i16 %a, i16 %b) {
+define ptr @shib(ptr %base, i16 %a, i16 %b) {
 ; RV64XTHEADMEMIDX-LABEL: shib:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    addw a1, a1, a2
 ; RV64XTHEADMEMIDX-NEXT:    th.shib a1, (a0), 2, 0
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %addr.1 = getelementptr i16, i16* %base, i16 1
+  %addr.1 = getelementptr i16, ptr %base, i16 1
   %res = add i16 %a, %b
-  store i16 %res, i16* %addr.1
-  ret i16* %addr.1
+  store i16 %res, ptr %addr.1
+  ret ptr %addr.1
 }
 
-define i32* @swia(i32* %base, i32 %a, i32 %b) {
+define ptr @swia(ptr %base, i32 %a, i32 %b) {
 ; RV64XTHEADMEMIDX-LABEL: swia:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    addw a1, a1, a2
 ; RV64XTHEADMEMIDX-NEXT:    th.swia a1, (a0), 8, 2
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %addr.1 = getelementptr i32, i32* %base, i32 8
+  %addr.1 = getelementptr i32, ptr %base, i32 8
   %res = add i32 %a, %b
-  store i32 %res, i32* %base
-  ret i32* %addr.1
+  store i32 %res, ptr %base
+  ret ptr %addr.1
 }
 
-define i32* @swib(i32* %base, i32 %a, i32 %b) {
+define ptr @swib(ptr %base, i32 %a, i32 %b) {
 ; RV64XTHEADMEMIDX-LABEL: swib:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    addw a1, a1, a2
 ; RV64XTHEADMEMIDX-NEXT:    th.swib a1, (a0), -13, 3
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %addr.1 = getelementptr i32, i32* %base, i32 -26
+  %addr.1 = getelementptr i32, ptr %base, i32 -26
   %res = add i32 %a, %b
-  store i32 %res, i32* %addr.1
-  ret i32* %addr.1
+  store i32 %res, ptr %addr.1
+  ret ptr %addr.1
 }
 
-define i64* @sdia(i64* %base, i64 %a, i64 %b) {
+define ptr @sdia(ptr %base, i64 %a, i64 %b) {
 ; RV64XTHEADMEMIDX-LABEL: sdia:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    add a1, a1, a2
 ; RV64XTHEADMEMIDX-NEXT:    th.sdia a1, (a0), 8, 3
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %addr.1 = getelementptr i64, i64* %base, i64 8
+  %addr.1 = getelementptr i64, ptr %base, i64 8
   %res = add i64 %a, %b
-  store i64 %res, i64* %base
-  ret i64* %addr.1
+  store i64 %res, ptr %base
+  ret ptr %addr.1
 }
 
-define i64* @sdib(i64* %base, i64 %a, i64 %b) {
+define ptr @sdib(ptr %base, i64 %a, i64 %b) {
 ; RV64XTHEADMEMIDX-LABEL: sdib:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    add a1, a1, a2
 ; RV64XTHEADMEMIDX-NEXT:    th.sdib a1, (a0), 8, 0
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %addr.1 = getelementptr i64, i64* %base, i64 1
+  %addr.1 = getelementptr i64, ptr %base, i64 1
   %res = add i64 %a, %b
-  store i64 %res, i64* %addr.1
-  ret i64* %addr.1
+  store i64 %res, ptr %addr.1
+  ret ptr %addr.1
 }
 
-define i8 @lrb_anyext(i8* %a, i64 %b) {
+define i8 @lrb_anyext(ptr %a, i64 %b) {
 ; RV64XTHEADMEMIDX-LABEL: lrb_anyext:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    th.lrb a0, a0, a1, 0
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %1 = getelementptr i8, i8* %a, i64 %b
-  %2 = load i8, i8* %1, align 1
+  %1 = getelementptr i8, ptr %a, i64 %b
+  %2 = load i8, ptr %1, align 1
   ret i8 %2
 }
 
-define i64 @lrb(i8* %a, i64 %b) {
+define i64 @lrb(ptr %a, i64 %b) {
 ; RV64XTHEADMEMIDX-LABEL: lrb:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    th.lrb a0, a0, a1, 0
 ; RV64XTHEADMEMIDX-NEXT:    add a0, a0, a0
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %1 = getelementptr i8, i8* %a, i64 %b
-  %2 = load i8, i8* %1, align 1
+  %1 = getelementptr i8, ptr %a, i64 %b
+  %2 = load i8, ptr %1, align 1
   %3 = sext i8 %2 to i64
   %4 = add i64 %3, %3
   ret i64 %4
 }
 
-define i8 @lurb_anyext(i8* %a, i32 %b) {
+define i8 @lurb_anyext(ptr %a, i32 %b) {
 ; RV64XTHEADMEMIDX-LABEL: lurb_anyext:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    th.lurb a0, a0, a1, 0
 ; RV64XTHEADMEMIDX-NEXT:    ret
   %1 = zext i32 %b to i64
-  %2 = getelementptr i8, i8* %a, i64 %1
-  %3 = load i8, i8* %2, align 1
+  %2 = getelementptr i8, ptr %a, i64 %1
+  %3 = load i8, ptr %2, align 1
   ret i8 %3
 }
 
-define i64 @lurb(i8* %a, i32 %b) {
+define i64 @lurb(ptr %a, i32 %b) {
 ; RV64XTHEADMEMIDX-LABEL: lurb:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    th.lurb a0, a0, a1, 0
 ; RV64XTHEADMEMIDX-NEXT:    add a0, a0, a0
 ; RV64XTHEADMEMIDX-NEXT:    ret
   %1 = zext i32 %b to i64
-  %2 = getelementptr i8, i8* %a, i64 %1
-  %3 = load i8, i8* %2, align 1
+  %2 = getelementptr i8, ptr %a, i64 %1
+  %3 = load i8, ptr %2, align 1
   %4 = sext i8 %3 to i64
   %5 = add i64 %4, %4
   ret i64 %5
 }
 
-define i64 @lrbu(i8* %a, i64 %b) {
+define i64 @lrbu(ptr %a, i64 %b) {
 ; RV64XTHEADMEMIDX-LABEL: lrbu:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    th.lrbu a0, a0, a1, 0
 ; RV64XTHEADMEMIDX-NEXT:    add a0, a0, a0
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %1 = getelementptr i8, i8* %a, i64 %b
-  %2 = load i8, i8* %1, align 1
+  %1 = getelementptr i8, ptr %a, i64 %b
+  %2 = load i8, ptr %1, align 1
   %3 = zext i8 %2 to i64
   %4 = add i64 %3, %3
   ret i64 %4
 }
 
-define i64 @lurbu(i8* %a, i32 %b) {
+define i64 @lurbu(ptr %a, i32 %b) {
 ; RV64XTHEADMEMIDX-LABEL: lurbu:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    th.lurbu a0, a0, a1, 0
 ; RV64XTHEADMEMIDX-NEXT:    add a0, a0, a0
 ; RV64XTHEADMEMIDX-NEXT:    ret
   %1 = zext i32 %b to i64
-  %2 = getelementptr i8, i8* %a, i64 %1
-  %3 = load i8, i8* %2, align 1
+  %2 = getelementptr i8, ptr %a, i64 %1
+  %3 = load i8, ptr %2, align 1
   %4 = zext i8 %3 to i64
   %5 = add i64 %4, %4
   ret i64 %5
 }
 
-define i16 @lrh_anyext(i16* %a, i64 %b) {
+define i16 @lrh_anyext(ptr %a, i64 %b) {
 ; RV64XTHEADMEMIDX-LABEL: lrh_anyext:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    th.lrh a0, a0, a1, 1
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %1 = getelementptr i16, i16* %a, i64 %b
-  %2 = load i16, i16* %1, align 2
+  %1 = getelementptr i16, ptr %a, i64 %b
+  %2 = load i16, ptr %1, align 2
   ret i16 %2
 }
 
-define i64 @lrh(i16* %a, i64 %b) {
+define i64 @lrh(ptr %a, i64 %b) {
 ; RV64XTHEADMEMIDX-LABEL: lrh:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    th.lrh a0, a0, a1, 1
 ; RV64XTHEADMEMIDX-NEXT:    add a0, a0, a0
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %1 = getelementptr i16, i16* %a, i64 %b
-  %2 = load i16, i16* %1, align 2
+  %1 = getelementptr i16, ptr %a, i64 %b
+  %2 = load i16, ptr %1, align 2
   %3 = sext i16 %2 to i64
   %4 = add i64 %3, %3
   ret i64 %4
 }
 
-define i16 @lurh_anyext(i16* %a, i32 %b) {
+define i16 @lurh_anyext(ptr %a, i32 %b) {
 ; RV64XTHEADMEMIDX-LABEL: lurh_anyext:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    th.lurh a0, a0, a1, 1
 ; RV64XTHEADMEMIDX-NEXT:    ret
   %1 = zext i32 %b to i64
-  %2 = getelementptr i16, i16* %a, i64 %1
-  %3 = load i16, i16* %2, align 2
+  %2 = getelementptr i16, ptr %a, i64 %1
+  %3 = load i16, ptr %2, align 2
   ret i16 %3
 }
 
-define i64 @lurh(i16* %a, i32 %b) {
+define i64 @lurh(ptr %a, i32 %b) {
 ; RV64XTHEADMEMIDX-LABEL: lurh:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    th.lurh a0, a0, a1, 1
 ; RV64XTHEADMEMIDX-NEXT:    add a0, a0, a0
 ; RV64XTHEADMEMIDX-NEXT:    ret
   %1 = zext i32 %b to i64
-  %2 = getelementptr i16, i16* %a, i64 %1
-  %3 = load i16, i16* %2, align 2
+  %2 = getelementptr i16, ptr %a, i64 %1
+  %3 = load i16, ptr %2, align 2
   %4 = sext i16 %3 to i64
   %5 = add i64 %4, %4
   ret i64 %5
 }
 
-define i64 @lrhu(i16* %a, i64 %b) {
+define i64 @lrhu(ptr %a, i64 %b) {
 ; RV64XTHEADMEMIDX-LABEL: lrhu:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    th.lrhu a0, a0, a1, 1
 ; RV64XTHEADMEMIDX-NEXT:    add a0, a0, a0
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %1 = getelementptr i16, i16* %a, i64 %b
-  %2 = load i16, i16* %1, align 2
+  %1 = getelementptr i16, ptr %a, i64 %b
+  %2 = load i16, ptr %1, align 2
   %3 = zext i16 %2 to i64
   %4 = add i64 %3, %3
   ret i64 %4
 }
 
-define i64 @lurhu(i16* %a, i32 %b) {
+define i64 @lurhu(ptr %a, i32 %b) {
 ; RV64XTHEADMEMIDX-LABEL: lurhu:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    th.lurhu a0, a0, a1, 1
 ; RV64XTHEADMEMIDX-NEXT:    add a0, a0, a0
 ; RV64XTHEADMEMIDX-NEXT:    ret
   %1 = zext i32 %b to i64
-  %2 = getelementptr i16, i16* %a, i64 %1
-  %3 = load i16, i16* %2, align 2
+  %2 = getelementptr i16, ptr %a, i64 %1
+  %3 = load i16, ptr %2, align 2
   %4 = zext i16 %3 to i64
   %5 = add i64 %4, %4
   ret i64 %5
 }
 
-define i32 @lrw_anyext(i32* %a, i64 %b) {
+define i32 @lrw_anyext(ptr %a, i64 %b) {
 ; RV64XTHEADMEMIDX-LABEL: lrw_anyext:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    th.lrw a0, a0, a1, 2
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %1 = getelementptr i32, i32* %a, i64 %b
-  %2 = load i32, i32* %1, align 4
+  %1 = getelementptr i32, ptr %a, i64 %b
+  %2 = load i32, ptr %1, align 4
   ret i32 %2
 }
 
-define i64 @lrw(i32* %a, i64 %b) {
+define i64 @lrw(ptr %a, i64 %b) {
 ; RV64XTHEADMEMIDX-LABEL: lrw:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    th.lrw a0, a0, a1, 2
 ; RV64XTHEADMEMIDX-NEXT:    add a0, a0, a0
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %1 = getelementptr i32, i32* %a, i64 %b
-  %2 = load i32, i32* %1, align 4
+  %1 = getelementptr i32, ptr %a, i64 %b
+  %2 = load i32, ptr %1, align 4
   %3 = sext i32 %2 to i64
   %4 = add i64 %3, %3
   ret i64 %4
 }
 
-define i32 @lurw_anyext(i32* %a, i32 %b) {
+define i32 @lurw_anyext(ptr %a, i32 %b) {
 ; RV64XTHEADMEMIDX-LABEL: lurw_anyext:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    th.lurw a0, a0, a1, 2
 ; RV64XTHEADMEMIDX-NEXT:    ret
   %1 = zext i32 %b to i64
-  %2 = getelementptr i32, i32* %a, i64 %1
-  %3 = load i32, i32* %2, align 4
+  %2 = getelementptr i32, ptr %a, i64 %1
+  %3 = load i32, ptr %2, align 4
   ret i32 %3
 }
 
-define i64 @lurw(i32* %a, i32 %b) {
+define i64 @lurw(ptr %a, i32 %b) {
 ; RV64XTHEADMEMIDX-LABEL: lurw:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    th.lurw a0, a0, a1, 2
 ; RV64XTHEADMEMIDX-NEXT:    add a0, a0, a0
 ; RV64XTHEADMEMIDX-NEXT:    ret
   %1 = zext i32 %b to i64
-  %2 = getelementptr i32, i32* %a, i64 %1
-  %3 = load i32, i32* %2, align 4
+  %2 = getelementptr i32, ptr %a, i64 %1
+  %3 = load i32, ptr %2, align 4
   %4 = sext i32 %3 to i64
   %5 = add i64 %4, %4
   ret i64 %5
 }
 
-define i64 @lrwu(i32* %a, i64 %b) {
+define i64 @lrwu(ptr %a, i64 %b) {
 ; RV64XTHEADMEMIDX-LABEL: lrwu:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    th.lrwu a0, a0, a1, 2
 ; RV64XTHEADMEMIDX-NEXT:    add a0, a0, a0
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %1 = getelementptr i32, i32* %a, i64 %b
-  %2 = load i32, i32* %1, align 4
+  %1 = getelementptr i32, ptr %a, i64 %b
+  %2 = load i32, ptr %1, align 4
   %3 = zext i32 %2 to i64
   %4 = add i64 %3, %3
   ret i64 %4
 }
 
-define i64 @lurwu(i32* %a, i32 %b) {
+define i64 @lurwu(ptr %a, i32 %b) {
 ; RV64XTHEADMEMIDX-LABEL: lurwu:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    th.lurwu a0, a0, a1, 2
 ; RV64XTHEADMEMIDX-NEXT:    add a0, a0, a0
 ; RV64XTHEADMEMIDX-NEXT:    ret
   %1 = zext i32 %b to i64
-  %2 = getelementptr i32, i32* %a, i64 %1
-  %3 = load i32, i32* %2, align 4
+  %2 = getelementptr i32, ptr %a, i64 %1
+  %3 = load i32, ptr %2, align 4
   %4 = zext i32 %3 to i64
   %5 = add i64 %4, %4
   ret i64 %5
 }
 
-define i64 @lrd(i64* %a, i64 %b) {
+define i64 @lrd(ptr %a, i64 %b) {
 ; RV64XTHEADMEMIDX-LABEL: lrd:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    th.lrd a0, a0, a1, 3
 ; RV64XTHEADMEMIDX-NEXT:    add a0, a0, a0
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %1 = getelementptr i64, i64* %a, i64 %b
-  %2 = load i64, i64* %1, align 8
+  %1 = getelementptr i64, ptr %a, i64 %b
+  %2 = load i64, ptr %1, align 8
   %3 = add i64 %2, %2
   ret i64 %3
 }
 
-define i64 @lrd_2(i64* %a, i64 %b) {
+define i64 @lrd_2(ptr %a, i64 %b) {
 ; RV64XTHEADMEMIDX-LABEL: lrd_2:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    addi a0, a0, 96
@@ -556,38 +556,38 @@ define i64 @lrd_2(i64* %a, i64 %b) {
 ; RV64XTHEADMEMIDX-NEXT:    add a0, a0, a0
 ; RV64XTHEADMEMIDX-NEXT:    ret
   %1 = add i64 %b, 12
-  %2 = getelementptr i64, i64* %a, i64 %1
-  %3 = load i64, i64* %2, align 8
+  %2 = getelementptr i64, ptr %a, i64 %1
+  %3 = load i64, ptr %2, align 8
   %4 = add i64 %3, %3
   ret i64 %4
 }
 
-define i64 @lurd(i64* %a, i32 %b) {
+define i64 @lurd(ptr %a, i32 %b) {
 ; RV64XTHEADMEMIDX-LABEL: lurd:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    th.lurd a0, a0, a1, 3
 ; RV64XTHEADMEMIDX-NEXT:    add a0, a0, a0
 ; RV64XTHEADMEMIDX-NEXT:    ret
   %1 = zext i32 %b to i64
-  %2 = getelementptr i64, i64* %a, i64 %1
-  %3 = load i64, i64* %2, align 8
+  %2 = getelementptr i64, ptr %a, i64 %1
+  %3 = load i64, ptr %2, align 8
   %4 = add i64 %3, %3
   ret i64 %4
 }
 
-define void @srb(i8* %a, i64 %b, i8 %c) {
+define void @srb(ptr %a, i64 %b, i8 %c) {
 ; RV64XTHEADMEMIDX-LABEL: srb:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    addw a2, a2, a2
 ; RV64XTHEADMEMIDX-NEXT:    th.srb a2, a0, a1, 0
 ; RV64XTHEADMEMIDX-NEXT:    ret
   %1 = add i8 %c, %c
-  %2 = getelementptr i8, i8* %a, i64 %b
-  store i8 %1, i8* %2, align 1
+  %2 = getelementptr i8, ptr %a, i64 %b
+  store i8 %1, ptr %2, align 1
   ret void
 }
 
-define void @surb(i8* %a, i32 %b, i8 %c) {
+define void @surb(ptr %a, i32 %b, i8 %c) {
 ; RV64XTHEADMEMIDX-LABEL: surb:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    addw a2, a2, a2
@@ -595,24 +595,24 @@ define void @surb(i8* %a, i32 %b, i8 %c) {
 ; RV64XTHEADMEMIDX-NEXT:    ret
   %1 = zext i32 %b to i64
   %2 = add i8 %c, %c
-  %3 = getelementptr i8, i8* %a, i64 %1
-  store i8 %2, i8* %3, align 1
+  %3 = getelementptr i8, ptr %a, i64 %1
+  store i8 %2, ptr %3, align 1
   ret void
 }
 
-define void @srh(i16* %a, i64 %b, i16 %c) {
+define void @srh(ptr %a, i64 %b, i16 %c) {
 ; RV64XTHEADMEMIDX-LABEL: srh:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    addw a2, a2, a2
 ; RV64XTHEADMEMIDX-NEXT:    th.srh a2, a0, a1, 1
 ; RV64XTHEADMEMIDX-NEXT:    ret
   %1 = add i16 %c, %c
-  %2 = getelementptr i16, i16* %a, i64 %b
-  store i16 %1, i16* %2, align 2
+  %2 = getelementptr i16, ptr %a, i64 %b
+  store i16 %1, ptr %2, align 2
   ret void
 }
 
-define void @surh(i16* %a, i32 %b, i16 %c) {
+define void @surh(ptr %a, i32 %b, i16 %c) {
 ; RV64XTHEADMEMIDX-LABEL: surh:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    addw a2, a2, a2
@@ -620,24 +620,24 @@ define void @surh(i16* %a, i32 %b, i16 %c) {
 ; RV64XTHEADMEMIDX-NEXT:    ret
   %1 = zext i32 %b to i64
   %2 = add i16 %c, %c
-  %3 = getelementptr i16, i16* %a, i64 %1
-  store i16 %2, i16* %3, align 2
+  %3 = getelementptr i16, ptr %a, i64 %1
+  store i16 %2, ptr %3, align 2
   ret void
 }
 
-define void @srw(i32* %a, i64 %b, i32 %c) {
+define void @srw(ptr %a, i64 %b, i32 %c) {
 ; RV64XTHEADMEMIDX-LABEL: srw:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    addw a2, a2, a2
 ; RV64XTHEADMEMIDX-NEXT:    th.srw a2, a0, a1, 2
 ; RV64XTHEADMEMIDX-NEXT:    ret
   %1 = add i32 %c, %c
-  %2 = getelementptr i32, i32* %a, i64 %b
-  store i32 %1, i32* %2, align 4
+  %2 = getelementptr i32, ptr %a, i64 %b
+  store i32 %1, ptr %2, align 4
   ret void
 }
 
-define void @surw(i32* %a, i32 %b, i32 %c) {
+define void @surw(ptr %a, i32 %b, i32 %c) {
 ; RV64XTHEADMEMIDX-LABEL: surw:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    addw a2, a2, a2
@@ -645,24 +645,24 @@ define void @surw(i32* %a, i32 %b, i32 %c) {
 ; RV64XTHEADMEMIDX-NEXT:    ret
   %1 = zext i32 %b to i64
   %2 = add i32 %c, %c
-  %3 = getelementptr i32, i32* %a, i64 %1
-  store i32 %2, i32* %3, align 4
+  %3 = getelementptr i32, ptr %a, i64 %1
+  store i32 %2, ptr %3, align 4
   ret void
 }
 
-define void @srd(i64* %a, i64 %b, i64 %c) {
+define void @srd(ptr %a, i64 %b, i64 %c) {
 ; RV64XTHEADMEMIDX-LABEL: srd:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    add a2, a2, a2
 ; RV64XTHEADMEMIDX-NEXT:    th.srd a2, a0, a1, 3
 ; RV64XTHEADMEMIDX-NEXT:    ret
   %1 = add i64 %c, %c
-  %2 = getelementptr i64, i64* %a, i64 %b
-  store i64 %1, i64* %2, align 8
+  %2 = getelementptr i64, ptr %a, i64 %b
+  store i64 %1, ptr %2, align 8
   ret void
 }
 
-define void @surd(i64* %a, i32 %b, i64 %c) {
+define void @surd(ptr %a, i32 %b, i64 %c) {
 ; RV64XTHEADMEMIDX-LABEL: surd:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    add a2, a2, a2
@@ -670,24 +670,24 @@ define void @surd(i64* %a, i32 %b, i64 %c) {
 ; RV64XTHEADMEMIDX-NEXT:    ret
   %1 = zext i32 %b to i64
   %2 = add i64 %c, %c
-  %3 = getelementptr i64, i64* %a, i64 %1
-  store i64 %2, i64* %3, align 8
+  %3 = getelementptr i64, ptr %a, i64 %1
+  store i64 %2, ptr %3, align 8
   ret void
 }
 
-define i32* @test_simm5(i32* %base, i32 %a, i32 %b) {
+define ptr @test_simm5(ptr %base, i32 %a, i32 %b) {
 ; RV64XTHEADMEMIDX-LABEL: test_simm5:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    addw a1, a1, a2
 ; RV64XTHEADMEMIDX-NEXT:    th.swia a1, (a0), -12, 2
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %addr.1 = getelementptr i32, i32* %base, i32 -12
+  %addr.1 = getelementptr i32, ptr %base, i32 -12
   %res = add i32 %a, %b
-  store i32 %res, i32* %base
-  ret i32* %addr.1
+  store i32 %res, ptr %base
+  ret ptr %addr.1
 }
 
-define i64 @lrd_large_shift(i64* %a, i64 %b) {
+define i64 @lrd_large_shift(ptr %a, i64 %b) {
 ; RV64XTHEADMEMIDX-LABEL: lrd_large_shift:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    slli a1, a1, 5
@@ -696,12 +696,12 @@ define i64 @lrd_large_shift(i64* %a, i64 %b) {
 ; RV64XTHEADMEMIDX-NEXT:    ret
   %1 = add i64 %b, 12
   %2 = shl i64 %1, 2
-  %3 = getelementptr i64, i64* %a, i64 %2
-  %4 = load i64, i64* %3, align 8
+  %3 = getelementptr i64, ptr %a, i64 %2
+  %4 = load i64, ptr %3, align 8
   ret i64 %4
 }
 
-define i64 @lrd_large_offset(i64* %a, i64 %b) {
+define i64 @lrd_large_offset(ptr %a, i64 %b) {
 ; RV64XTHEADMEMIDX-LABEL: lrd_large_offset:
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    slli a1, a1, 3
@@ -711,7 +711,7 @@ define i64 @lrd_large_offset(i64* %a, i64 %b) {
 ; RV64XTHEADMEMIDX-NEXT:    ld a0, 1792(a0)
 ; RV64XTHEADMEMIDX-NEXT:    ret
   %1 = add i64 %b, 12000
-  %2 = getelementptr i64, i64* %a, i64 %1
-  %3 = load i64, i64* %2, align 8
+  %2 = getelementptr i64, ptr %a, i64 %1
+  %3 = load i64, ptr %2, align 8
   ret i64 %3
 }

diff  --git a/llvm/test/CodeGen/RISCV/rv64-patchpoint.ll b/llvm/test/CodeGen/RISCV/rv64-patchpoint.ll
index d2a3bccfef7bb..51c2ae908e842 100644
--- a/llvm/test/CodeGen/RISCV/rv64-patchpoint.ll
+++ b/llvm/test/CodeGen/RISCV/rv64-patchpoint.ll
@@ -14,10 +14,10 @@ define void @small_patchpoint_codegen(i64 %p1, i64 %p2, i64 %p3, i64 %p4) {
 ; CHECK-NEXT:    nop
 ; CHECK-NEXT:    ret
 entry:
-  %result = tail call i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 5, i32 20, i8* null, i32 2, i64 %p1, i64 %p2)
+  %result = tail call i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 5, i32 20, ptr null, i32 2, i64 %p1, i64 %p2)
   ret void
 }
 
 declare void @llvm.experimental.stackmap(i64, i32, ...)
-declare void @llvm.experimental.patchpoint.void(i64, i32, i8*, i32, ...)
-declare i64 @llvm.experimental.patchpoint.i64(i64, i32, i8*, i32, ...)
+declare void @llvm.experimental.patchpoint.void(i64, i32, ptr, i32, ...)
+declare i64 @llvm.experimental.patchpoint.i64(i64, i32, ptr, i32, ...)

diff  --git a/llvm/test/CodeGen/RISCV/rv64-stackmap-frame-setup.ll b/llvm/test/CodeGen/RISCV/rv64-stackmap-frame-setup.ll
index e1bed39a500bd..1bba9cbfd03c4 100644
--- a/llvm/test/CodeGen/RISCV/rv64-stackmap-frame-setup.ll
+++ b/llvm/test/CodeGen/RISCV/rv64-stackmap-frame-setup.ll
@@ -12,10 +12,10 @@ define void @caller_meta_leaf() {
   ; ISEL-NEXT:   PseudoRET
 entry:
   %metadata = alloca i64, i32 3, align 8
-  store i64 11, i64* %metadata
-  store i64 12, i64* %metadata
-  store i64 13, i64* %metadata
-  call void (i64, i32, ...) @llvm.experimental.stackmap(i64 4, i32 0, i64* %metadata)
+  store i64 11, ptr %metadata
+  store i64 12, ptr %metadata
+  store i64 13, ptr %metadata
+  call void (i64, i32, ...) @llvm.experimental.stackmap(i64 4, i32 0, ptr %metadata)
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/RISCV/rv64-stackmap.ll b/llvm/test/CodeGen/RISCV/rv64-stackmap.ll
index e1dde97337314..d07f608bf7893 100644
--- a/llvm/test/CodeGen/RISCV/rv64-stackmap.ll
+++ b/llvm/test/CodeGen/RISCV/rv64-stackmap.ll
@@ -92,8 +92,8 @@
 
 define void @constantargs() {
 entry:
-  %0 = inttoptr i64 244837814094590 to i8*
-  tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 1, i32 28, i8* %0, i32 0, i64 65535, i64 65536, i64 4294967295, i64 4294967296)
+  %0 = inttoptr i64 244837814094590 to ptr
+  tail call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 1, i32 28, ptr %0, i32 0, i64 65535, i64 65536, i64 4294967295, i64 4294967296)
   ret void
 }
 
@@ -117,7 +117,7 @@ entry:
 define void @osrinline(i64 %a, i64 %b) {
 entry:
   ; Runtime void->void call.
-  call void inttoptr (i64 244837814094590 to void ()*)()
+  call void inttoptr (i64 244837814094590 to ptr)()
   ; Followed by inline OSR patchpoint with 12-byte shadow and 2 live vars.
   call void (i64, i32, ...) @llvm.experimental.stackmap(i64 3, i32 12, i64 %a, i64 %b)
   ret void
@@ -148,8 +148,8 @@ entry:
   br i1 %test, label %ret, label %cold
 cold:
   ; OSR patchpoint with 28-byte nop-slide and 2 live vars.
-  %thunk = inttoptr i64 244837814094590 to i8*
-  call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 4, i32 28, i8* %thunk, i32 0, i64 %a, i64 %b)
+  %thunk = inttoptr i64 244837814094590 to ptr
+  call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 4, i32 28, ptr %thunk, i32 0, i64 %a, i64 %b)
   unreachable
 ret:
   ret void
@@ -171,10 +171,10 @@ ret:
 ; CHECK-NEXT:   .half   {{[0-9]+}}
 ; CHECK-NEXT:   .half   0
 ; CHECK-NEXT:   .word   0
-define i64 @propertyRead(i64* %obj) {
+define i64 @propertyRead(ptr %obj) {
 entry:
-  %resolveRead = inttoptr i64 244837814094590 to i8*
-  %result = call anyregcc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 5, i32 28, i8* %resolveRead, i32 1, i64* %obj)
+  %resolveRead = inttoptr i64 244837814094590 to ptr
+  %result = call anyregcc i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 5, i32 28, ptr %resolveRead, i32 1, ptr %obj)
   %add = add i64 %result, 3
   ret i64 %add
 }
@@ -195,10 +195,10 @@ entry:
 ; CHECK-NEXT:   .half   {{[0-9]+}}
 ; CHECK-NEXT:   .half   0
 ; CHECK-NEXT:   .word   0
-define void @propertyWrite(i64 %dummy1, i64* %obj, i64 %dummy2, i64 %a) {
+define void @propertyWrite(i64 %dummy1, ptr %obj, i64 %dummy2, i64 %a) {
 entry:
-  %resolveWrite = inttoptr i64 244837814094590 to i8*
-  call anyregcc void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 6, i32 28, i8* %resolveWrite, i32 2, i64* %obj, i64 %a)
+  %resolveWrite = inttoptr i64 244837814094590 to ptr
+  call anyregcc void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 6, i32 28, ptr %resolveWrite, i32 2, ptr %obj, i64 %a)
   ret void
 }
 
@@ -221,10 +221,10 @@ entry:
 ; CHECK-NEXT:   .half   {{[0-9]+}}
 ; CHECK-NEXT:   .half   0
 ; CHECK-NEXT:   .word   0
-define void @jsVoidCall(i64 %dummy1, i64* %obj, i64 %arg, i64 %l1, i64 %l2) {
+define void @jsVoidCall(i64 %dummy1, ptr %obj, i64 %arg, i64 %l1, i64 %l2) {
 entry:
-  %resolveCall = inttoptr i64 244837814094590 to i8*
-  call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 7, i32 28, i8* %resolveCall, i32 2, i64* %obj, i64 %arg, i64 %l1, i64 %l2)
+  %resolveCall = inttoptr i64 244837814094590 to ptr
+  call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 7, i32 28, ptr %resolveCall, i32 2, ptr %obj, i64 %arg, i64 %l1, i64 %l2)
   ret void
 }
 
@@ -247,10 +247,10 @@ entry:
 ; CHECK-NEXT:   .half   {{[0-9]+}}
 ; CHECK-NEXT:   .half   0
 ; CHECK-NEXT:   .word   0
-define i64 @jsIntCall(i64 %dummy1, i64* %obj, i64 %arg, i64 %l1, i64 %l2) {
+define i64 @jsIntCall(i64 %dummy1, ptr %obj, i64 %arg, i64 %l1, i64 %l2) {
 entry:
-  %resolveCall = inttoptr i64 244837814094590 to i8*
-  %result = call i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 8, i32 28, i8* %resolveCall, i32 2, i64* %obj, i64 %arg, i64 %l1, i64 %l2)
+  %resolveCall = inttoptr i64 244837814094590 to ptr
+  %result = call i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 8, i32 28, ptr %resolveCall, i32 2, ptr %obj, i64 %arg, i64 %l1, i64 %l2)
   %add = add i64 %result, 3
   ret i64 %add
 }
@@ -292,7 +292,7 @@ define void @liveConstant() {
 ; CHECK-NEXT:   .word
 define void @spilledValue(i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16, i64 %l17, i64 %l18, i64 %l19, i64 %l20, i64 %l21, i64 %l22, i64 %l23, i64 %l24, i64 %l25, i64 %l26, i64 %l27) {
 entry:
-  call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 11, i32 28, i8* null, i32 5, i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16, i64 %l17, i64 %l18, i64 %l19, i64 %l20, i64 %l21, i64 %l22, i64 %l23, i64 %l24, i64 %l25, i64 %l26, i64 %l27)
+  call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 11, i32 28, ptr null, i32 5, i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16, i64 %l17, i64 %l18, i64 %l19, i64 %l20, i64 %l21, i64 %l22, i64 %l23, i64 %l24, i64 %l25, i64 %l26, i64 %l27)
   ret void
 }
 
@@ -333,13 +333,13 @@ entry:
 define void @directFrameIdx() {
 entry:
   %metadata1 = alloca i64, i32 3, align 8
-  store i64 11, i64* %metadata1
-  store i64 12, i64* %metadata1
-  store i64 13, i64* %metadata1
-  call void (i64, i32, ...) @llvm.experimental.stackmap(i64 16, i32 0, i64* %metadata1)
+  store i64 11, ptr %metadata1
+  store i64 12, ptr %metadata1
+  store i64 13, ptr %metadata1
+  call void (i64, i32, ...) @llvm.experimental.stackmap(i64 16, i32 0, ptr %metadata1)
   %metadata2 = alloca i8, i32 4, align 8
   %metadata3 = alloca i16, i32 4, align 8
-  call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 17, i32 4, i8* null, i32 0, i8* %metadata2, i16* %metadata3)
+  call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 17, i32 4, ptr null, i32 0, ptr %metadata2, ptr %metadata3)
   ret void
 }
 
@@ -355,10 +355,10 @@ entry:
 ; CHECK-LABEL:  .word   .L{{.*}}-longid
 define void @longid() {
 entry:
-  tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 4294967295, i32 0, i8* null, i32 0)
-  tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 4294967296, i32 0, i8* null, i32 0)
-  tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 9223372036854775807, i32 0, i8* null, i32 0)
-  tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 -1, i32 0, i8* null, i32 0)
+  tail call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 4294967295, i32 0, ptr null, i32 0)
+  tail call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 4294967296, i32 0, ptr null, i32 0)
+  tail call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 9223372036854775807, i32 0, ptr null, i32 0)
+  tail call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 -1, i32 0, ptr null, i32 0)
   ret void
 }
 
@@ -370,7 +370,7 @@ entry:
 ; CHECK-NEXT:   .half   0
 define void @needsStackRealignment() {
   %val = alloca i64, i32 3, align 128
-  tail call void (...) @escape_values(i64* %val)
+  tail call void (...) @escape_values(ptr %val)
 ; Note: Adding any non-constant to the stackmap would fail because we
 ; expected to be able to address off the frame pointer.  In a realigned
 ; frame, we must use the stack pointer instead.  This is a separate bug.
@@ -380,5 +380,5 @@ define void @needsStackRealignment() {
 declare void @escape_values(...)
 
 declare void @llvm.experimental.stackmap(i64, i32, ...)
-declare void @llvm.experimental.patchpoint.void(i64, i32, i8*, i32, ...)
-declare i64 @llvm.experimental.patchpoint.i64(i64, i32, i8*, i32, ...)
+declare void @llvm.experimental.patchpoint.void(i64, i32, ptr, i32, ...)
+declare i64 @llvm.experimental.patchpoint.i64(i64, i32, ptr, i32, ...)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll b/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll
index 2386e3a243575..e578aada5a9cf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll
@@ -15,7 +15,7 @@ define <vscale x 1 x i64> @access_fixed_object(ptr %val) {
 ; RV64IV-NEXT:    ret
   %local = alloca i64
   %array = alloca [64 x i64]
-  %v = load <vscale x 1 x i64>, <vscale x 1 x i64>* %array
+  %v = load <vscale x 1 x i64>, ptr %array
   %len = load i64, ptr %local
   store i64 %len, ptr %val
   ret <vscale x 1 x i64> %v
@@ -51,8 +51,8 @@ define <vscale x 1 x i64> @access_fixed_and_vector_objects(ptr %val) {
   %local = alloca i64
   %vector = alloca <vscale x 1 x i64>
   %array = alloca [64 x i64]
-  %v1 = load <vscale x 1 x i64>, <vscale x 1 x i64>* %array
-  %v2 = load <vscale x 1 x i64>, <vscale x 1 x i64>* %vector
+  %v1 = load <vscale x 1 x i64>, ptr %array
+  %v2 = load <vscale x 1 x i64>, ptr %vector
   %len = load i64, ptr %local
 
   %a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64(

diff  --git a/llvm/test/CodeGen/RISCV/rvv/addi-rvv-stack-object.mir b/llvm/test/CodeGen/RISCV/rvv/addi-rvv-stack-object.mir
index 83fc1fc994cfa..5255728821039 100644
--- a/llvm/test/CodeGen/RISCV/rvv/addi-rvv-stack-object.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/addi-rvv-stack-object.mir
@@ -1,10 +1,10 @@
 # RUN: llc -mtriple=riscv64 -mattr=+v -run-pass prologepilog %s -o - | FileCheck %s
 --- |
-  declare void @extern(<vscale x 16 x i8>*) #0
+  declare void @extern(ptr) #0
   
   define void @addi_rvv_stack_object() #0 {
     %local0 = alloca <vscale x 16 x i8>, align 16
-    call void @extern(<vscale x 16 x i8>* %local0)
+    call void @extern(ptr %local0)
     ret void
   }
   

diff  --git a/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir b/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir
index de941b8739a81..a54da97d2548a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir
@@ -3,7 +3,7 @@
 
 --- |
   define void @add_scalable_offset(
-            <vscale x 1 x i64> *%pa,
+            ptr %pa,
             i64 %vl)
   {
     ret void

diff  --git a/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll b/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll
index 4143ea25f2bba..a9a680d54d589 100644
--- a/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll
@@ -6,7 +6,7 @@ target triple = "riscv64-unknown-unknown-elf"
 
 %struct.test = type { <vscale x 1 x double>, <vscale x 1 x double> }
 
-define <vscale x 1 x double> @test(%struct.test* %addr, i64 %vl) {
+define <vscale x 1 x double> @test(ptr %addr, i64 %vl) {
 ; CHECK-LABEL: test:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addi sp, sp, -16
@@ -34,9 +34,9 @@ define <vscale x 1 x double> @test(%struct.test* %addr, i64 %vl) {
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
   %ret = alloca %struct.test, align 8
-  %val = load %struct.test, %struct.test* %addr
-  store %struct.test %val, %struct.test* %ret, align 8
-  %0 = load %struct.test, %struct.test* %ret, align 8
+  %val = load %struct.test, ptr %addr
+  store %struct.test %val, ptr %ret, align 8
+  %0 = load %struct.test, ptr %ret, align 8
   %1 = extractvalue %struct.test %0, 0
   %2 = extractvalue %struct.test %0, 1
   %3 = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64(

diff  --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
index 661b79141fee5..187f758b78020 100644
--- a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
@@ -4,35 +4,35 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s --check-prefixes=CHECK,RV64
 
-define fastcc <vscale x 4 x i8> @ret_nxv4i8(<vscale x 4 x i8>* %p) {
+define fastcc <vscale x 4 x i8> @ret_nxv4i8(ptr %p) {
 ; CHECK-LABEL: ret_nxv4i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %v = load <vscale x 4 x i8>, <vscale x 4 x i8>* %p
+  %v = load <vscale x 4 x i8>, ptr %p
   ret <vscale x 4 x i8> %v
 }
 
-define fastcc <vscale x 4 x i32> @ret_nxv4i32(<vscale x 4 x i32>* %p) {
+define fastcc <vscale x 4 x i32> @ret_nxv4i32(ptr %p) {
 ; CHECK-LABEL: ret_nxv4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl2re32.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %v = load <vscale x 4 x i32>, <vscale x 4 x i32>* %p
+  %v = load <vscale x 4 x i32>, ptr %p
   ret <vscale x 4 x i32> %v
 }
 
-define fastcc <vscale x 8 x i32> @ret_nxv8i32(<vscale x 8 x i32>* %p) {
+define fastcc <vscale x 8 x i32> @ret_nxv8i32(ptr %p) {
 ; CHECK-LABEL: ret_nxv8i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl4re32.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %v = load <vscale x 8 x i32>, <vscale x 8 x i32>* %p
+  %v = load <vscale x 8 x i32>, ptr %p
   ret <vscale x 8 x i32> %v
 }
 
-define fastcc <vscale x 16 x i64> @ret_nxv16i64(<vscale x 16 x i64>* %p) {
+define fastcc <vscale x 16 x i64> @ret_nxv16i64(ptr %p) {
 ; CHECK-LABEL: ret_nxv16i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a1, vlenb
@@ -41,32 +41,32 @@ define fastcc <vscale x 16 x i64> @ret_nxv16i64(<vscale x 16 x i64>* %p) {
 ; CHECK-NEXT:    vl8re64.v v16, (a1)
 ; CHECK-NEXT:    vl8re64.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %v = load <vscale x 16 x i64>, <vscale x 16 x i64>* %p
+  %v = load <vscale x 16 x i64>, ptr %p
   ret <vscale x 16 x i64> %v
 }
 
-define fastcc <vscale x 8 x i1> @ret_mask_nxv8i1(<vscale x 8 x i1>* %p) {
+define fastcc <vscale x 8 x i1> @ret_mask_nxv8i1(ptr %p) {
 ; CHECK-LABEL: ret_mask_nxv8i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vlm.v v0, (a0)
 ; CHECK-NEXT:    ret
-  %v = load <vscale x 8 x i1>, <vscale x 8 x i1>* %p
+  %v = load <vscale x 8 x i1>, ptr %p
   ret <vscale x 8 x i1> %v
 }
 
-define fastcc <vscale x 32 x i1> @ret_mask_nxv32i1(<vscale x 32 x i1>* %p) {
+define fastcc <vscale x 32 x i1> @ret_mask_nxv32i1(ptr %p) {
 ; CHECK-LABEL: ret_mask_nxv32i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
 ; CHECK-NEXT:    vlm.v v0, (a0)
 ; CHECK-NEXT:    ret
-  %v = load <vscale x 32 x i1>, <vscale x 32 x i1>* %p
+  %v = load <vscale x 32 x i1>, ptr %p
   ret <vscale x 32 x i1> %v
 }
 
 ; Return the vector via registers v8-v23
-define fastcc <vscale x 64 x i32> @ret_split_nxv64i32(<vscale x 64 x i32>* %x) {
+define fastcc <vscale x 64 x i32> @ret_split_nxv64i32(ptr %x) {
 ; CHECK-LABEL: ret_split_nxv64i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a2, vlenb
@@ -89,12 +89,12 @@ define fastcc <vscale x 64 x i32> @ret_split_nxv64i32(<vscale x 64 x i32>* %x) {
 ; CHECK-NEXT:    add a0, a0, a3
 ; CHECK-NEXT:    vs8r.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %v = load <vscale x 64 x i32>, <vscale x 64 x i32>* %x
+  %v = load <vscale x 64 x i32>, ptr %x
   ret <vscale x 64 x i32> %v
 }
 
 ; Return the vector fully via the stack
-define fastcc <vscale x 128 x i32> @ret_split_nxv128i32(<vscale x 128 x i32>* %x) {
+define fastcc <vscale x 128 x i32> @ret_split_nxv128i32(ptr %x) {
 ; CHECK-LABEL: ret_split_nxv128i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi sp, sp, -16
@@ -186,7 +186,7 @@ define fastcc <vscale x 128 x i32> @ret_split_nxv128i32(<vscale x 128 x i32>* %x
 ; CHECK-NEXT:    add sp, sp, a0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
-  %v = load <vscale x 128 x i32>, <vscale x 128 x i32>* %x
+  %v = load <vscale x 128 x i32>, ptr %x
   ret <vscale x 128 x i32> %v
 }
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll
index d27bc236fb4d8..dc4d28819bbbd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll
@@ -14,7 +14,7 @@
 ; limits performance.
 ; FIXME: This is potentially bad for register pressure. Need a better heuristic.
 
-define void @sink_splat_add(i32* nocapture %a, i32 signext %x) {
+define void @sink_splat_add(ptr nocapture %a, i32 signext %x) {
 ; NO-SINK-LABEL: sink_splat_add:
 ; NO-SINK:       # %bb.0: # %entry
 ; NO-SINK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -67,12 +67,12 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i32, i32* %a, i64 %index
-  %1 = bitcast i32* %0 to <4 x i32>*
-  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
+  %0 = getelementptr inbounds i32, ptr %a, i64 %index
+  %1 = bitcast ptr %0 to ptr
+  %wide.load = load <4 x i32>, ptr %1, align 4
   %2 = add <4 x i32> %wide.load, %broadcast.splat
-  %3 = bitcast i32* %0 to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
+  %3 = bitcast ptr %0 to ptr
+  store <4 x i32> %2, ptr %3, align 4
   %index.next = add nuw i64 %index, 4
   %4 = icmp eq i64 %index.next, 1024
   br i1 %4, label %for.cond.cleanup, label %vector.body
@@ -83,7 +83,7 @@ for.cond.cleanup:                                 ; preds = %vector.body
 
 declare i64 @llvm.vscale.i64()
 
-define void @sink_splat_add_scalable(i32* nocapture %a, i32 signext %x) {
+define void @sink_splat_add_scalable(ptr nocapture %a, i32 signext %x) {
 ; NO-SINK-LABEL: sink_splat_add_scalable:
 ; NO-SINK:       # %bb.0: # %entry
 ; NO-SINK-NEXT:    csrr a5, vlenb
@@ -229,12 +229,12 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %6 = getelementptr inbounds i32, i32* %a, i64 %index
-  %7 = bitcast i32* %6 to <vscale x 4 x i32>*
-  %wide.load = load <vscale x 4 x i32>, <vscale x 4 x i32>* %7, align 4
+  %6 = getelementptr inbounds i32, ptr %a, i64 %index
+  %7 = bitcast ptr %6 to ptr
+  %wide.load = load <vscale x 4 x i32>, ptr %7, align 4
   %8 = add <vscale x 4 x i32> %wide.load, %broadcast.splat
-  %9 = bitcast i32* %6 to <vscale x 4 x i32>*
-  store <vscale x 4 x i32> %8, <vscale x 4 x i32>* %9, align 4
+  %9 = bitcast ptr %6 to ptr
+  store <vscale x 4 x i32> %8, ptr %9, align 4
   %index.next = add nuw i64 %index, %5
   %10 = icmp eq i64 %index.next, %n.vec
   br i1 %10, label %middle.block, label %vector.body
@@ -252,10 +252,10 @@ for.cond.cleanup:                                 ; preds = %for.body, %middle.b
 
 for.body:                                         ; preds = %for.body.preheader, %for.body
   %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ]
-  %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
-  %11 = load i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+  %11 = load i32, ptr %arrayidx, align 4
   %add = add i32 %11, %x
-  store i32 %add, i32* %arrayidx, align 4
+  store i32 %add, ptr %arrayidx, align 4
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
   %cmp.not = icmp eq i64 %indvars.iv.next, 1024
   br i1 %cmp.not, label %for.cond.cleanup, label %for.body
@@ -263,7 +263,7 @@ for.body:                                         ; preds = %for.body.preheader,
 
 declare <4 x i32> @llvm.vp.add.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
 
-define void @sink_splat_vp_add(i32* nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) {
+define void @sink_splat_vp_add(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) {
 ; NO-SINK-LABEL: sink_splat_vp_add:
 ; NO-SINK:       # %bb.0: # %entry
 ; NO-SINK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -322,12 +322,12 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i32, i32* %a, i64 %index
-  %1 = bitcast i32* %0 to <4 x i32>*
-  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
+  %0 = getelementptr inbounds i32, ptr %a, i64 %index
+  %1 = bitcast ptr %0 to ptr
+  %wide.load = load <4 x i32>, ptr %1, align 4
   %2 = call <4 x i32> @llvm.vp.add.v4i32(<4 x i32> %wide.load, <4 x i32> %broadcast.splat, <4 x i1> %m, i32 %vl)
-  %3 = bitcast i32* %0 to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
+  %3 = bitcast ptr %0 to ptr
+  store <4 x i32> %2, ptr %3, align 4
   %index.next = add nuw i64 %index, 4
   %4 = icmp eq i64 %index.next, 1024
   br i1 %4, label %for.cond.cleanup, label %vector.body
@@ -336,7 +336,7 @@ for.cond.cleanup:                                 ; preds = %vector.body
   ret void
 }
 
-define void @sink_splat_fadd(float* nocapture %a, float %x) {
+define void @sink_splat_fadd(ptr nocapture %a, float %x) {
 ; NO-SINK-LABEL: sink_splat_fadd:
 ; NO-SINK:       # %bb.0: # %entry
 ; NO-SINK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -389,12 +389,12 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds float, float* %a, i64 %index
-  %1 = bitcast float* %0 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %1, align 4
+  %0 = getelementptr inbounds float, ptr %a, i64 %index
+  %1 = bitcast ptr %0 to ptr
+  %wide.load = load <4 x float>, ptr %1, align 4
   %2 = fadd <4 x float> %wide.load, %broadcast.splat
-  %3 = bitcast float* %0 to <4 x float>*
-  store <4 x float> %2, <4 x float>* %3, align 4
+  %3 = bitcast ptr %0 to ptr
+  store <4 x float> %2, ptr %3, align 4
   %index.next = add nuw i64 %index, 4
   %4 = icmp eq i64 %index.next, 1024
   br i1 %4, label %for.cond.cleanup, label %vector.body
@@ -403,7 +403,7 @@ for.cond.cleanup:                                 ; preds = %vector.body
   ret void
 }
 
-define void @sink_splat_fadd_scalable(float* nocapture %a, float %x) {
+define void @sink_splat_fadd_scalable(ptr nocapture %a, float %x) {
 ; NO-SINK-LABEL: sink_splat_fadd_scalable:
 ; NO-SINK:       # %bb.0: # %entry
 ; NO-SINK-NEXT:    csrr a1, vlenb
@@ -546,12 +546,12 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %6 = getelementptr inbounds float, float* %a, i64 %index
-  %7 = bitcast float* %6 to <vscale x 2 x float>*
-  %wide.load = load <vscale x 2 x float>, <vscale x 2 x float>* %7, align 4
+  %6 = getelementptr inbounds float, ptr %a, i64 %index
+  %7 = bitcast ptr %6 to ptr
+  %wide.load = load <vscale x 2 x float>, ptr %7, align 4
   %8 = fadd <vscale x 2 x float> %wide.load, %broadcast.splat
-  %9 = bitcast float* %6 to <vscale x 2 x float>*
-  store <vscale x 2 x float> %8, <vscale x 2 x float>* %9, align 4
+  %9 = bitcast ptr %6 to ptr
+  store <vscale x 2 x float> %8, ptr %9, align 4
   %index.next = add nuw i64 %index, %5
   %10 = icmp eq i64 %index.next, %n.vec
   br i1 %10, label %middle.block, label %vector.body
@@ -569,10 +569,10 @@ for.cond.cleanup:                                 ; preds = %for.body, %middle.b
 
 for.body:                                         ; preds = %for.body.preheader, %for.body
   %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ]
-  %arrayidx = getelementptr inbounds float, float* %a, i64 %indvars.iv
-  %11 = load float, float* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds float, ptr %a, i64 %indvars.iv
+  %11 = load float, ptr %arrayidx, align 4
   %mul = fadd float %11, %x
-  store float %mul, float* %arrayidx, align 4
+  store float %mul, ptr %arrayidx, align 4
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
   %cmp.not = icmp eq i64 %indvars.iv.next, 1024
   br i1 %cmp.not, label %for.cond.cleanup, label %for.body
@@ -580,7 +580,7 @@ for.body:                                         ; preds = %for.body.preheader,
 
 declare <4 x float> @llvm.vp.fadd.v4i32(<4 x float>, <4 x float>, <4 x i1>, i32)
 
-define void @sink_splat_vp_fadd(float* nocapture %a, float %x, <4 x i1> %m, i32 zeroext %vl) {
+define void @sink_splat_vp_fadd(ptr nocapture %a, float %x, <4 x i1> %m, i32 zeroext %vl) {
 ; NO-SINK-LABEL: sink_splat_vp_fadd:
 ; NO-SINK:       # %bb.0: # %entry
 ; NO-SINK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -639,12 +639,12 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds float, float* %a, i64 %index
-  %1 = bitcast float* %0 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %1, align 4
+  %0 = getelementptr inbounds float, ptr %a, i64 %index
+  %1 = bitcast ptr %0 to ptr
+  %wide.load = load <4 x float>, ptr %1, align 4
   %2 = call <4 x float> @llvm.vp.fadd.v4i32(<4 x float> %wide.load, <4 x float> %broadcast.splat, <4 x i1> %m, i32 %vl)
-  %3 = bitcast float* %0 to <4 x float>*
-  store <4 x float> %2, <4 x float>* %3, align 4
+  %3 = bitcast ptr %0 to ptr
+  store <4 x float> %2, ptr %3, align 4
   %index.next = add nuw i64 %index, 4
   %4 = icmp eq i64 %index.next, 1024
   br i1 %4, label %for.cond.cleanup, label %vector.body

diff  --git a/llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll b/llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll
index b3938b8280419..dfc70299d015b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -verify-machineinstrs < %s | FileCheck %s
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -verify-machineinstrs < %s | FileCheck %s
 
-define <vscale x 1 x i8> @sextload_nxv1i1_nxv1i8(<vscale x 1 x i1>* %x) {
+define <vscale x 1 x i8> @sextload_nxv1i1_nxv1i8(ptr %x) {
 ; CHECK-LABEL: sextload_nxv1i1_nxv1i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
@@ -10,372 +10,372 @@ define <vscale x 1 x i8> @sextload_nxv1i1_nxv1i8(<vscale x 1 x i1>* %x) {
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    vmerge.vim v8, v8, -1, v0
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 1 x i1>, <vscale x 1 x i1>* %x
+  %y = load <vscale x 1 x i1>, ptr %x
   %z = sext <vscale x 1 x i1> %y to <vscale x 1 x i8>
   ret <vscale x 1 x i8> %z
 }
 
-define <vscale x 1 x i16> @sextload_nxv1i8_nxv1i16(<vscale x 1 x i8>* %x) {
+define <vscale x 1 x i16> @sextload_nxv1i8_nxv1i16(ptr %x) {
 ; CHECK-LABEL: sextload_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vle8.v v9, (a0)
 ; CHECK-NEXT:    vsext.vf2 v8, v9
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 1 x i8>, <vscale x 1 x i8>* %x
+  %y = load <vscale x 1 x i8>, ptr %x
   %z = sext <vscale x 1 x i8> %y to <vscale x 1 x i16>
   ret <vscale x 1 x i16> %z
 }
 
-define <vscale x 1 x i16> @zextload_nxv1i8_nxv1i16(<vscale x 1 x i8>* %x) {
+define <vscale x 1 x i16> @zextload_nxv1i8_nxv1i16(ptr %x) {
 ; CHECK-LABEL: zextload_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vle8.v v9, (a0)
 ; CHECK-NEXT:    vzext.vf2 v8, v9
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 1 x i8>, <vscale x 1 x i8>* %x
+  %y = load <vscale x 1 x i8>, ptr %x
   %z = zext <vscale x 1 x i8> %y to <vscale x 1 x i16>
   ret <vscale x 1 x i16> %z
 }
 
-define <vscale x 1 x i32> @sextload_nxv1i8_nxv1i32(<vscale x 1 x i8>* %x) {
+define <vscale x 1 x i32> @sextload_nxv1i8_nxv1i32(ptr %x) {
 ; CHECK-LABEL: sextload_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vle8.v v9, (a0)
 ; CHECK-NEXT:    vsext.vf4 v8, v9
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 1 x i8>, <vscale x 1 x i8>* %x
+  %y = load <vscale x 1 x i8>, ptr %x
   %z = sext <vscale x 1 x i8> %y to <vscale x 1 x i32>
   ret <vscale x 1 x i32> %z
 }
 
-define <vscale x 1 x i32> @zextload_nxv1i8_nxv1i32(<vscale x 1 x i8>* %x) {
+define <vscale x 1 x i32> @zextload_nxv1i8_nxv1i32(ptr %x) {
 ; CHECK-LABEL: zextload_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vle8.v v9, (a0)
 ; CHECK-NEXT:    vzext.vf4 v8, v9
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 1 x i8>, <vscale x 1 x i8>* %x
+  %y = load <vscale x 1 x i8>, ptr %x
   %z = zext <vscale x 1 x i8> %y to <vscale x 1 x i32>
   ret <vscale x 1 x i32> %z
 }
 
-define <vscale x 1 x i64> @sextload_nxv1i8_nxv1i64(<vscale x 1 x i8>* %x) {
+define <vscale x 1 x i64> @sextload_nxv1i8_nxv1i64(ptr %x) {
 ; CHECK-LABEL: sextload_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vle8.v v9, (a0)
 ; CHECK-NEXT:    vsext.vf8 v8, v9
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 1 x i8>, <vscale x 1 x i8>* %x
+  %y = load <vscale x 1 x i8>, ptr %x
   %z = sext <vscale x 1 x i8> %y to <vscale x 1 x i64>
   ret <vscale x 1 x i64> %z
 }
 
-define <vscale x 1 x i64> @zextload_nxv1i8_nxv1i64(<vscale x 1 x i8>* %x) {
+define <vscale x 1 x i64> @zextload_nxv1i8_nxv1i64(ptr %x) {
 ; CHECK-LABEL: zextload_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vle8.v v9, (a0)
 ; CHECK-NEXT:    vzext.vf8 v8, v9
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 1 x i8>, <vscale x 1 x i8>* %x
+  %y = load <vscale x 1 x i8>, ptr %x
   %z = zext <vscale x 1 x i8> %y to <vscale x 1 x i64>
   ret <vscale x 1 x i64> %z
 }
 
-define <vscale x 2 x i16> @sextload_nxv2i8_nxv2i16(<vscale x 2 x i8>* %x) {
+define <vscale x 2 x i16> @sextload_nxv2i8_nxv2i16(ptr %x) {
 ; CHECK-LABEL: sextload_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vle8.v v9, (a0)
 ; CHECK-NEXT:    vsext.vf2 v8, v9
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 2 x i8>, <vscale x 2 x i8>* %x
+  %y = load <vscale x 2 x i8>, ptr %x
   %z = sext <vscale x 2 x i8> %y to <vscale x 2 x i16>
   ret <vscale x 2 x i16> %z
 }
 
-define <vscale x 2 x i16> @zextload_nxv2i8_nxv2i16(<vscale x 2 x i8>* %x) {
+define <vscale x 2 x i16> @zextload_nxv2i8_nxv2i16(ptr %x) {
 ; CHECK-LABEL: zextload_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vle8.v v9, (a0)
 ; CHECK-NEXT:    vzext.vf2 v8, v9
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 2 x i8>, <vscale x 2 x i8>* %x
+  %y = load <vscale x 2 x i8>, ptr %x
   %z = zext <vscale x 2 x i8> %y to <vscale x 2 x i16>
   ret <vscale x 2 x i16> %z
 }
 
-define <vscale x 2 x i32> @sextload_nxv2i8_nxv2i32(<vscale x 2 x i8>* %x) {
+define <vscale x 2 x i32> @sextload_nxv2i8_nxv2i32(ptr %x) {
 ; CHECK-LABEL: sextload_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vle8.v v9, (a0)
 ; CHECK-NEXT:    vsext.vf4 v8, v9
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 2 x i8>, <vscale x 2 x i8>* %x
+  %y = load <vscale x 2 x i8>, ptr %x
   %z = sext <vscale x 2 x i8> %y to <vscale x 2 x i32>
   ret <vscale x 2 x i32> %z
 }
 
-define <vscale x 2 x i32> @zextload_nxv2i8_nxv2i32(<vscale x 2 x i8>* %x) {
+define <vscale x 2 x i32> @zextload_nxv2i8_nxv2i32(ptr %x) {
 ; CHECK-LABEL: zextload_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vle8.v v9, (a0)
 ; CHECK-NEXT:    vzext.vf4 v8, v9
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 2 x i8>, <vscale x 2 x i8>* %x
+  %y = load <vscale x 2 x i8>, ptr %x
   %z = zext <vscale x 2 x i8> %y to <vscale x 2 x i32>
   ret <vscale x 2 x i32> %z
 }
 
-define <vscale x 2 x i64> @sextload_nxv2i8_nxv2i64(<vscale x 2 x i8>* %x) {
+define <vscale x 2 x i64> @sextload_nxv2i8_nxv2i64(ptr %x) {
 ; CHECK-LABEL: sextload_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vle8.v v10, (a0)
 ; CHECK-NEXT:    vsext.vf8 v8, v10
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 2 x i8>, <vscale x 2 x i8>* %x
+  %y = load <vscale x 2 x i8>, ptr %x
   %z = sext <vscale x 2 x i8> %y to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %z
 }
 
-define <vscale x 2 x i64> @zextload_nxv2i8_nxv2i64(<vscale x 2 x i8>* %x) {
+define <vscale x 2 x i64> @zextload_nxv2i8_nxv2i64(ptr %x) {
 ; CHECK-LABEL: zextload_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vle8.v v10, (a0)
 ; CHECK-NEXT:    vzext.vf8 v8, v10
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 2 x i8>, <vscale x 2 x i8>* %x
+  %y = load <vscale x 2 x i8>, ptr %x
   %z = zext <vscale x 2 x i8> %y to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %z
 }
 
-define <vscale x 4 x i16> @sextload_nxv4i8_nxv4i16(<vscale x 4 x i8>* %x) {
+define <vscale x 4 x i16> @sextload_nxv4i8_nxv4i16(ptr %x) {
 ; CHECK-LABEL: sextload_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vle8.v v9, (a0)
 ; CHECK-NEXT:    vsext.vf2 v8, v9
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 4 x i8>, <vscale x 4 x i8>* %x
+  %y = load <vscale x 4 x i8>, ptr %x
   %z = sext <vscale x 4 x i8> %y to <vscale x 4 x i16>
   ret <vscale x 4 x i16> %z
 }
 
-define <vscale x 4 x i16> @zextload_nxv4i8_nxv4i16(<vscale x 4 x i8>* %x) {
+define <vscale x 4 x i16> @zextload_nxv4i8_nxv4i16(ptr %x) {
 ; CHECK-LABEL: zextload_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vle8.v v9, (a0)
 ; CHECK-NEXT:    vzext.vf2 v8, v9
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 4 x i8>, <vscale x 4 x i8>* %x
+  %y = load <vscale x 4 x i8>, ptr %x
   %z = zext <vscale x 4 x i8> %y to <vscale x 4 x i16>
   ret <vscale x 4 x i16> %z
 }
 
-define <vscale x 4 x i32> @sextload_nxv4i8_nxv4i32(<vscale x 4 x i8>* %x) {
+define <vscale x 4 x i32> @sextload_nxv4i8_nxv4i32(ptr %x) {
 ; CHECK-LABEL: sextload_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vle8.v v10, (a0)
 ; CHECK-NEXT:    vsext.vf4 v8, v10
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 4 x i8>, <vscale x 4 x i8>* %x
+  %y = load <vscale x 4 x i8>, ptr %x
   %z = sext <vscale x 4 x i8> %y to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %z
 }
 
-define <vscale x 4 x i32> @zextload_nxv4i8_nxv4i32(<vscale x 4 x i8>* %x) {
+define <vscale x 4 x i32> @zextload_nxv4i8_nxv4i32(ptr %x) {
 ; CHECK-LABEL: zextload_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vle8.v v10, (a0)
 ; CHECK-NEXT:    vzext.vf4 v8, v10
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 4 x i8>, <vscale x 4 x i8>* %x
+  %y = load <vscale x 4 x i8>, ptr %x
   %z = zext <vscale x 4 x i8> %y to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %z
 }
 
-define <vscale x 4 x i64> @sextload_nxv4i8_nxv4i64(<vscale x 4 x i8>* %x) {
+define <vscale x 4 x i64> @sextload_nxv4i8_nxv4i64(ptr %x) {
 ; CHECK-LABEL: sextload_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vle8.v v12, (a0)
 ; CHECK-NEXT:    vsext.vf8 v8, v12
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 4 x i8>, <vscale x 4 x i8>* %x
+  %y = load <vscale x 4 x i8>, ptr %x
   %z = sext <vscale x 4 x i8> %y to <vscale x 4 x i64>
   ret <vscale x 4 x i64> %z
 }
 
-define <vscale x 4 x i64> @zextload_nxv4i8_nxv4i64(<vscale x 4 x i8>* %x) {
+define <vscale x 4 x i64> @zextload_nxv4i8_nxv4i64(ptr %x) {
 ; CHECK-LABEL: zextload_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vle8.v v12, (a0)
 ; CHECK-NEXT:    vzext.vf8 v8, v12
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 4 x i8>, <vscale x 4 x i8>* %x
+  %y = load <vscale x 4 x i8>, ptr %x
   %z = zext <vscale x 4 x i8> %y to <vscale x 4 x i64>
   ret <vscale x 4 x i64> %z
 }
 
-define <vscale x 8 x i16> @sextload_nxv8i8_nxv8i16(<vscale x 8 x i8>* %x) {
+define <vscale x 8 x i16> @sextload_nxv8i8_nxv8i16(ptr %x) {
 ; CHECK-LABEL: sextload_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl1r.v v10, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vsext.vf2 v8, v10
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 8 x i8>, <vscale x 8 x i8>* %x
+  %y = load <vscale x 8 x i8>, ptr %x
   %z = sext <vscale x 8 x i8> %y to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %z
 }
 
-define <vscale x 8 x i16> @zextload_nxv8i8_nxv8i16(<vscale x 8 x i8>* %x) {
+define <vscale x 8 x i16> @zextload_nxv8i8_nxv8i16(ptr %x) {
 ; CHECK-LABEL: zextload_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl1r.v v10, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vzext.vf2 v8, v10
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 8 x i8>, <vscale x 8 x i8>* %x
+  %y = load <vscale x 8 x i8>, ptr %x
   %z = zext <vscale x 8 x i8> %y to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %z
 }
 
-define <vscale x 8 x i32> @sextload_nxv8i8_nxv8i32(<vscale x 8 x i8>* %x) {
+define <vscale x 8 x i32> @sextload_nxv8i8_nxv8i32(ptr %x) {
 ; CHECK-LABEL: sextload_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl1r.v v12, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vsext.vf4 v8, v12
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 8 x i8>, <vscale x 8 x i8>* %x
+  %y = load <vscale x 8 x i8>, ptr %x
   %z = sext <vscale x 8 x i8> %y to <vscale x 8 x i32>
   ret <vscale x 8 x i32> %z
 }
 
-define <vscale x 8 x i32> @zextload_nxv8i8_nxv8i32(<vscale x 8 x i8>* %x) {
+define <vscale x 8 x i32> @zextload_nxv8i8_nxv8i32(ptr %x) {
 ; CHECK-LABEL: zextload_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl1r.v v12, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vzext.vf4 v8, v12
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 8 x i8>, <vscale x 8 x i8>* %x
+  %y = load <vscale x 8 x i8>, ptr %x
   %z = zext <vscale x 8 x i8> %y to <vscale x 8 x i32>
   ret <vscale x 8 x i32> %z
 }
 
-define <vscale x 8 x i64> @sextload_nxv8i8_nxv8i64(<vscale x 8 x i8>* %x) {
+define <vscale x 8 x i64> @sextload_nxv8i8_nxv8i64(ptr %x) {
 ; CHECK-LABEL: sextload_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl1r.v v16, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vsext.vf8 v8, v16
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 8 x i8>, <vscale x 8 x i8>* %x
+  %y = load <vscale x 8 x i8>, ptr %x
   %z = sext <vscale x 8 x i8> %y to <vscale x 8 x i64>
   ret <vscale x 8 x i64> %z
 }
 
-define <vscale x 8 x i64> @zextload_nxv8i8_nxv8i64(<vscale x 8 x i8>* %x) {
+define <vscale x 8 x i64> @zextload_nxv8i8_nxv8i64(ptr %x) {
 ; CHECK-LABEL: zextload_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl1r.v v16, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vzext.vf8 v8, v16
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 8 x i8>, <vscale x 8 x i8>* %x
+  %y = load <vscale x 8 x i8>, ptr %x
   %z = zext <vscale x 8 x i8> %y to <vscale x 8 x i64>
   ret <vscale x 8 x i64> %z
 }
 
-define <vscale x 16 x i16> @sextload_nxv16i8_nxv16i16(<vscale x 16 x i8>* %x) {
+define <vscale x 16 x i16> @sextload_nxv16i8_nxv16i16(ptr %x) {
 ; CHECK-LABEL: sextload_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl2r.v v12, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vsext.vf2 v8, v12
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 16 x i8>, <vscale x 16 x i8>* %x
+  %y = load <vscale x 16 x i8>, ptr %x
   %z = sext <vscale x 16 x i8> %y to <vscale x 16 x i16>
   ret <vscale x 16 x i16> %z
 }
 
-define <vscale x 16 x i16> @zextload_nxv16i8_nxv16i16(<vscale x 16 x i8>* %x) {
+define <vscale x 16 x i16> @zextload_nxv16i8_nxv16i16(ptr %x) {
 ; CHECK-LABEL: zextload_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl2r.v v12, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vzext.vf2 v8, v12
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 16 x i8>, <vscale x 16 x i8>* %x
+  %y = load <vscale x 16 x i8>, ptr %x
   %z = zext <vscale x 16 x i8> %y to <vscale x 16 x i16>
   ret <vscale x 16 x i16> %z
 }
 
-define <vscale x 16 x i32> @sextload_nxv16i8_nxv16i32(<vscale x 16 x i8>* %x) {
+define <vscale x 16 x i32> @sextload_nxv16i8_nxv16i32(ptr %x) {
 ; CHECK-LABEL: sextload_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl2r.v v16, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vsext.vf4 v8, v16
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 16 x i8>, <vscale x 16 x i8>* %x
+  %y = load <vscale x 16 x i8>, ptr %x
   %z = sext <vscale x 16 x i8> %y to <vscale x 16 x i32>
   ret <vscale x 16 x i32> %z
 }
 
-define <vscale x 16 x i32> @zextload_nxv16i8_nxv16i32(<vscale x 16 x i8>* %x) {
+define <vscale x 16 x i32> @zextload_nxv16i8_nxv16i32(ptr %x) {
 ; CHECK-LABEL: zextload_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl2r.v v16, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vzext.vf4 v8, v16
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 16 x i8>, <vscale x 16 x i8>* %x
+  %y = load <vscale x 16 x i8>, ptr %x
   %z = zext <vscale x 16 x i8> %y to <vscale x 16 x i32>
   ret <vscale x 16 x i32> %z
 }
 
-define <vscale x 32 x i16> @sextload_nxv32i8_nxv32i16(<vscale x 32 x i8>* %x) {
+define <vscale x 32 x i16> @sextload_nxv32i8_nxv32i16(ptr %x) {
 ; CHECK-LABEL: sextload_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl4r.v v16, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vsext.vf2 v8, v16
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 32 x i8>, <vscale x 32 x i8>* %x
+  %y = load <vscale x 32 x i8>, ptr %x
   %z = sext <vscale x 32 x i8> %y to <vscale x 32 x i16>
   ret <vscale x 32 x i16> %z
 }
 
-define <vscale x 32 x i16> @zextload_nxv32i8_nxv32i16(<vscale x 32 x i8>* %x) {
+define <vscale x 32 x i16> @zextload_nxv32i8_nxv32i16(ptr %x) {
 ; CHECK-LABEL: zextload_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl4r.v v16, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vzext.vf2 v8, v16
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 32 x i8>, <vscale x 32 x i8>* %x
+  %y = load <vscale x 32 x i8>, ptr %x
   %z = zext <vscale x 32 x i8> %y to <vscale x 32 x i16>
   ret <vscale x 32 x i16> %z
 }
 
-define void @truncstore_nxv1i8_nxv1i1(<vscale x 1 x i8> %x, <vscale x 1 x i1> *%z) {
+define void @truncstore_nxv1i8_nxv1i1(<vscale x 1 x i8> %x, ptr %z) {
 ; CHECK-LABEL: truncstore_nxv1i8_nxv1i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
@@ -384,11 +384,11 @@ define void @truncstore_nxv1i8_nxv1i1(<vscale x 1 x i8> %x, <vscale x 1 x i1> *%
 ; CHECK-NEXT:    vsm.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 1 x i8> %x to <vscale x 1 x i1>
-  store <vscale x 1 x i1> %y, <vscale x 1 x i1>* %z
+  store <vscale x 1 x i1> %y, ptr %z
   ret void
 }
 
-define void @truncstore_nxv1i16_nxv1i8(<vscale x 1 x i16> %x, <vscale x 1 x i8>* %z) {
+define void @truncstore_nxv1i16_nxv1i8(<vscale x 1 x i16> %x, ptr %z) {
 ; CHECK-LABEL: truncstore_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
@@ -396,59 +396,59 @@ define void @truncstore_nxv1i16_nxv1i8(<vscale x 1 x i16> %x, <vscale x 1 x i8>*
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 1 x i16> %x to <vscale x 1 x i8>
-  store <vscale x 1 x i8> %y, <vscale x 1 x i8>* %z
+  store <vscale x 1 x i8> %y, ptr %z
   ret void
 }
 
-define <vscale x 1 x i32> @sextload_nxv1i16_nxv1i32(<vscale x 1 x i16>* %x) {
+define <vscale x 1 x i32> @sextload_nxv1i16_nxv1i32(ptr %x) {
 ; CHECK-LABEL: sextload_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vle16.v v9, (a0)
 ; CHECK-NEXT:    vsext.vf2 v8, v9
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 1 x i16>, <vscale x 1 x i16>* %x
+  %y = load <vscale x 1 x i16>, ptr %x
   %z = sext <vscale x 1 x i16> %y to <vscale x 1 x i32>
   ret <vscale x 1 x i32> %z
 }
 
-define <vscale x 1 x i32> @zextload_nxv1i16_nxv1i32(<vscale x 1 x i16>* %x) {
+define <vscale x 1 x i32> @zextload_nxv1i16_nxv1i32(ptr %x) {
 ; CHECK-LABEL: zextload_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vle16.v v9, (a0)
 ; CHECK-NEXT:    vzext.vf2 v8, v9
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 1 x i16>, <vscale x 1 x i16>* %x
+  %y = load <vscale x 1 x i16>, ptr %x
   %z = zext <vscale x 1 x i16> %y to <vscale x 1 x i32>
   ret <vscale x 1 x i32> %z
 }
 
-define <vscale x 1 x i64> @sextload_nxv1i16_nxv1i64(<vscale x 1 x i16>* %x) {
+define <vscale x 1 x i64> @sextload_nxv1i16_nxv1i64(ptr %x) {
 ; CHECK-LABEL: sextload_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vle16.v v9, (a0)
 ; CHECK-NEXT:    vsext.vf4 v8, v9
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 1 x i16>, <vscale x 1 x i16>* %x
+  %y = load <vscale x 1 x i16>, ptr %x
   %z = sext <vscale x 1 x i16> %y to <vscale x 1 x i64>
   ret <vscale x 1 x i64> %z
 }
 
-define <vscale x 1 x i64> @zextload_nxv1i16_nxv1i64(<vscale x 1 x i16>* %x) {
+define <vscale x 1 x i64> @zextload_nxv1i16_nxv1i64(ptr %x) {
 ; CHECK-LABEL: zextload_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vle16.v v9, (a0)
 ; CHECK-NEXT:    vzext.vf4 v8, v9
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 1 x i16>, <vscale x 1 x i16>* %x
+  %y = load <vscale x 1 x i16>, ptr %x
   %z = zext <vscale x 1 x i16> %y to <vscale x 1 x i64>
   ret <vscale x 1 x i64> %z
 }
 
-define void @truncstore_nxv2i16_nxv2i8(<vscale x 2 x i16> %x, <vscale x 2 x i8>* %z) {
+define void @truncstore_nxv2i16_nxv2i8(<vscale x 2 x i16> %x, ptr %z) {
 ; CHECK-LABEL: truncstore_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
@@ -456,59 +456,59 @@ define void @truncstore_nxv2i16_nxv2i8(<vscale x 2 x i16> %x, <vscale x 2 x i8>*
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 2 x i16> %x to <vscale x 2 x i8>
-  store <vscale x 2 x i8> %y, <vscale x 2 x i8>* %z
+  store <vscale x 2 x i8> %y, ptr %z
   ret void
 }
 
-define <vscale x 2 x i32> @sextload_nxv2i16_nxv2i32(<vscale x 2 x i16>* %x) {
+define <vscale x 2 x i32> @sextload_nxv2i16_nxv2i32(ptr %x) {
 ; CHECK-LABEL: sextload_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vle16.v v9, (a0)
 ; CHECK-NEXT:    vsext.vf2 v8, v9
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 2 x i16>, <vscale x 2 x i16>* %x
+  %y = load <vscale x 2 x i16>, ptr %x
   %z = sext <vscale x 2 x i16> %y to <vscale x 2 x i32>
   ret <vscale x 2 x i32> %z
 }
 
-define <vscale x 2 x i32> @zextload_nxv2i16_nxv2i32(<vscale x 2 x i16>* %x) {
+define <vscale x 2 x i32> @zextload_nxv2i16_nxv2i32(ptr %x) {
 ; CHECK-LABEL: zextload_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vle16.v v9, (a0)
 ; CHECK-NEXT:    vzext.vf2 v8, v9
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 2 x i16>, <vscale x 2 x i16>* %x
+  %y = load <vscale x 2 x i16>, ptr %x
   %z = zext <vscale x 2 x i16> %y to <vscale x 2 x i32>
   ret <vscale x 2 x i32> %z
 }
 
-define <vscale x 2 x i64> @sextload_nxv2i16_nxv2i64(<vscale x 2 x i16>* %x) {
+define <vscale x 2 x i64> @sextload_nxv2i16_nxv2i64(ptr %x) {
 ; CHECK-LABEL: sextload_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vle16.v v10, (a0)
 ; CHECK-NEXT:    vsext.vf4 v8, v10
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 2 x i16>, <vscale x 2 x i16>* %x
+  %y = load <vscale x 2 x i16>, ptr %x
   %z = sext <vscale x 2 x i16> %y to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %z
 }
 
-define <vscale x 2 x i64> @zextload_nxv2i16_nxv2i64(<vscale x 2 x i16>* %x) {
+define <vscale x 2 x i64> @zextload_nxv2i16_nxv2i64(ptr %x) {
 ; CHECK-LABEL: zextload_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vle16.v v10, (a0)
 ; CHECK-NEXT:    vzext.vf4 v8, v10
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 2 x i16>, <vscale x 2 x i16>* %x
+  %y = load <vscale x 2 x i16>, ptr %x
   %z = zext <vscale x 2 x i16> %y to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %z
 }
 
-define void @truncstore_nxv4i16_nxv4i8(<vscale x 4 x i16> %x, <vscale x 4 x i8>* %z) {
+define void @truncstore_nxv4i16_nxv4i8(<vscale x 4 x i16> %x, ptr %z) {
 ; CHECK-LABEL: truncstore_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
@@ -516,59 +516,59 @@ define void @truncstore_nxv4i16_nxv4i8(<vscale x 4 x i16> %x, <vscale x 4 x i8>*
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 4 x i16> %x to <vscale x 4 x i8>
-  store <vscale x 4 x i8> %y, <vscale x 4 x i8>* %z
+  store <vscale x 4 x i8> %y, ptr %z
   ret void
 }
 
-define <vscale x 4 x i32> @sextload_nxv4i16_nxv4i32(<vscale x 4 x i16>* %x) {
+define <vscale x 4 x i32> @sextload_nxv4i16_nxv4i32(ptr %x) {
 ; CHECK-LABEL: sextload_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl1re16.v v10, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vsext.vf2 v8, v10
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 4 x i16>, <vscale x 4 x i16>* %x
+  %y = load <vscale x 4 x i16>, ptr %x
   %z = sext <vscale x 4 x i16> %y to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %z
 }
 
-define <vscale x 4 x i32> @zextload_nxv4i16_nxv4i32(<vscale x 4 x i16>* %x) {
+define <vscale x 4 x i32> @zextload_nxv4i16_nxv4i32(ptr %x) {
 ; CHECK-LABEL: zextload_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl1re16.v v10, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vzext.vf2 v8, v10
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 4 x i16>, <vscale x 4 x i16>* %x
+  %y = load <vscale x 4 x i16>, ptr %x
   %z = zext <vscale x 4 x i16> %y to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %z
 }
 
-define <vscale x 4 x i64> @sextload_nxv4i16_nxv4i64(<vscale x 4 x i16>* %x) {
+define <vscale x 4 x i64> @sextload_nxv4i16_nxv4i64(ptr %x) {
 ; CHECK-LABEL: sextload_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl1re16.v v12, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vsext.vf4 v8, v12
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 4 x i16>, <vscale x 4 x i16>* %x
+  %y = load <vscale x 4 x i16>, ptr %x
   %z = sext <vscale x 4 x i16> %y to <vscale x 4 x i64>
   ret <vscale x 4 x i64> %z
 }
 
-define <vscale x 4 x i64> @zextload_nxv4i16_nxv4i64(<vscale x 4 x i16>* %x) {
+define <vscale x 4 x i64> @zextload_nxv4i16_nxv4i64(ptr %x) {
 ; CHECK-LABEL: zextload_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl1re16.v v12, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vzext.vf4 v8, v12
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 4 x i16>, <vscale x 4 x i16>* %x
+  %y = load <vscale x 4 x i16>, ptr %x
   %z = zext <vscale x 4 x i16> %y to <vscale x 4 x i64>
   ret <vscale x 4 x i64> %z
 }
 
-define void @truncstore_nxv8i16_nxv8i8(<vscale x 8 x i16> %x, <vscale x 8 x i8>* %z) {
+define void @truncstore_nxv8i16_nxv8i8(<vscale x 8 x i16> %x, ptr %z) {
 ; CHECK-LABEL: truncstore_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
@@ -576,59 +576,59 @@ define void @truncstore_nxv8i16_nxv8i8(<vscale x 8 x i16> %x, <vscale x 8 x i8>*
 ; CHECK-NEXT:    vs1r.v v10, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 8 x i16> %x to <vscale x 8 x i8>
-  store <vscale x 8 x i8> %y, <vscale x 8 x i8>* %z
+  store <vscale x 8 x i8> %y, ptr %z
   ret void
 }
 
-define <vscale x 8 x i32> @sextload_nxv8i16_nxv8i32(<vscale x 8 x i16>* %x) {
+define <vscale x 8 x i32> @sextload_nxv8i16_nxv8i32(ptr %x) {
 ; CHECK-LABEL: sextload_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl2re16.v v12, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vsext.vf2 v8, v12
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 8 x i16>, <vscale x 8 x i16>* %x
+  %y = load <vscale x 8 x i16>, ptr %x
   %z = sext <vscale x 8 x i16> %y to <vscale x 8 x i32>
   ret <vscale x 8 x i32> %z
 }
 
-define <vscale x 8 x i32> @zextload_nxv8i16_nxv8i32(<vscale x 8 x i16>* %x) {
+define <vscale x 8 x i32> @zextload_nxv8i16_nxv8i32(ptr %x) {
 ; CHECK-LABEL: zextload_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl2re16.v v12, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vzext.vf2 v8, v12
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 8 x i16>, <vscale x 8 x i16>* %x
+  %y = load <vscale x 8 x i16>, ptr %x
   %z = zext <vscale x 8 x i16> %y to <vscale x 8 x i32>
   ret <vscale x 8 x i32> %z
 }
 
-define <vscale x 8 x i64> @sextload_nxv8i16_nxv8i64(<vscale x 8 x i16>* %x) {
+define <vscale x 8 x i64> @sextload_nxv8i16_nxv8i64(ptr %x) {
 ; CHECK-LABEL: sextload_nxv8i16_nxv8i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl2re16.v v16, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vsext.vf4 v8, v16
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 8 x i16>, <vscale x 8 x i16>* %x
+  %y = load <vscale x 8 x i16>, ptr %x
   %z = sext <vscale x 8 x i16> %y to <vscale x 8 x i64>
   ret <vscale x 8 x i64> %z
 }
 
-define <vscale x 8 x i64> @zextload_nxv8i16_nxv8i64(<vscale x 8 x i16>* %x) {
+define <vscale x 8 x i64> @zextload_nxv8i16_nxv8i64(ptr %x) {
 ; CHECK-LABEL: zextload_nxv8i16_nxv8i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl2re16.v v16, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vzext.vf4 v8, v16
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 8 x i16>, <vscale x 8 x i16>* %x
+  %y = load <vscale x 8 x i16>, ptr %x
   %z = zext <vscale x 8 x i16> %y to <vscale x 8 x i64>
   ret <vscale x 8 x i64> %z
 }
 
-define void @truncstore_nxv16i16_nxv16i8(<vscale x 16 x i16> %x, <vscale x 16 x i8>* %z) {
+define void @truncstore_nxv16i16_nxv16i8(<vscale x 16 x i16> %x, ptr %z) {
 ; CHECK-LABEL: truncstore_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
@@ -636,35 +636,35 @@ define void @truncstore_nxv16i16_nxv16i8(<vscale x 16 x i16> %x, <vscale x 16 x
 ; CHECK-NEXT:    vs2r.v v12, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 16 x i16> %x to <vscale x 16 x i8>
-  store <vscale x 16 x i8> %y, <vscale x 16 x i8>* %z
+  store <vscale x 16 x i8> %y, ptr %z
   ret void
 }
 
-define <vscale x 16 x i32> @sextload_nxv16i16_nxv16i32(<vscale x 16 x i16>* %x) {
+define <vscale x 16 x i32> @sextload_nxv16i16_nxv16i32(ptr %x) {
 ; CHECK-LABEL: sextload_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl4re16.v v16, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vsext.vf2 v8, v16
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 16 x i16>, <vscale x 16 x i16>* %x
+  %y = load <vscale x 16 x i16>, ptr %x
   %z = sext <vscale x 16 x i16> %y to <vscale x 16 x i32>
   ret <vscale x 16 x i32> %z
 }
 
-define <vscale x 16 x i32> @zextload_nxv16i16_nxv16i32(<vscale x 16 x i16>* %x) {
+define <vscale x 16 x i32> @zextload_nxv16i16_nxv16i32(ptr %x) {
 ; CHECK-LABEL: zextload_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl4re16.v v16, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vzext.vf2 v8, v16
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 16 x i16>, <vscale x 16 x i16>* %x
+  %y = load <vscale x 16 x i16>, ptr %x
   %z = zext <vscale x 16 x i16> %y to <vscale x 16 x i32>
   ret <vscale x 16 x i32> %z
 }
 
-define void @truncstore_nxv32i16_nxv32i8(<vscale x 32 x i16> %x, <vscale x 32 x i8>* %z) {
+define void @truncstore_nxv32i16_nxv32i8(<vscale x 32 x i16> %x, ptr %z) {
 ; CHECK-LABEL: truncstore_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
@@ -672,11 +672,11 @@ define void @truncstore_nxv32i16_nxv32i8(<vscale x 32 x i16> %x, <vscale x 32 x
 ; CHECK-NEXT:    vs4r.v v16, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 32 x i16> %x to <vscale x 32 x i8>
-  store <vscale x 32 x i8> %y, <vscale x 32 x i8>* %z
+  store <vscale x 32 x i8> %y, ptr %z
   ret void
 }
 
-define void @truncstore_nxv1i32_nxv1i8(<vscale x 1 x i32> %x, <vscale x 1 x i8>* %z) {
+define void @truncstore_nxv1i32_nxv1i8(<vscale x 1 x i32> %x, ptr %z) {
 ; CHECK-LABEL: truncstore_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
@@ -686,11 +686,11 @@ define void @truncstore_nxv1i32_nxv1i8(<vscale x 1 x i32> %x, <vscale x 1 x i8>*
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 1 x i32> %x to <vscale x 1 x i8>
-  store <vscale x 1 x i8> %y, <vscale x 1 x i8>* %z
+  store <vscale x 1 x i8> %y, ptr %z
   ret void
 }
 
-define void @truncstore_nxv1i32_nxv1i16(<vscale x 1 x i32> %x, <vscale x 1 x i16>* %z) {
+define void @truncstore_nxv1i32_nxv1i16(<vscale x 1 x i32> %x, ptr %z) {
 ; CHECK-LABEL: truncstore_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
@@ -698,35 +698,35 @@ define void @truncstore_nxv1i32_nxv1i16(<vscale x 1 x i32> %x, <vscale x 1 x i16
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 1 x i32> %x to <vscale x 1 x i16>
-  store <vscale x 1 x i16> %y, <vscale x 1 x i16>* %z
+  store <vscale x 1 x i16> %y, ptr %z
   ret void
 }
 
-define <vscale x 1 x i64> @sextload_nxv1i32_nxv1i64(<vscale x 1 x i32>* %x) {
+define <vscale x 1 x i64> @sextload_nxv1i32_nxv1i64(ptr %x) {
 ; CHECK-LABEL: sextload_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vle32.v v9, (a0)
 ; CHECK-NEXT:    vsext.vf2 v8, v9
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 1 x i32>, <vscale x 1 x i32>* %x
+  %y = load <vscale x 1 x i32>, ptr %x
   %z = sext <vscale x 1 x i32> %y to <vscale x 1 x i64>
   ret <vscale x 1 x i64> %z
 }
 
-define <vscale x 1 x i64> @zextload_nxv1i32_nxv1i64(<vscale x 1 x i32>* %x) {
+define <vscale x 1 x i64> @zextload_nxv1i32_nxv1i64(ptr %x) {
 ; CHECK-LABEL: zextload_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vle32.v v9, (a0)
 ; CHECK-NEXT:    vzext.vf2 v8, v9
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 1 x i32>, <vscale x 1 x i32>* %x
+  %y = load <vscale x 1 x i32>, ptr %x
   %z = zext <vscale x 1 x i32> %y to <vscale x 1 x i64>
   ret <vscale x 1 x i64> %z
 }
 
-define void @truncstore_nxv2i32_nxv2i8(<vscale x 2 x i32> %x, <vscale x 2 x i8>* %z) {
+define void @truncstore_nxv2i32_nxv2i8(<vscale x 2 x i32> %x, ptr %z) {
 ; CHECK-LABEL: truncstore_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
@@ -736,11 +736,11 @@ define void @truncstore_nxv2i32_nxv2i8(<vscale x 2 x i32> %x, <vscale x 2 x i8>*
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 2 x i32> %x to <vscale x 2 x i8>
-  store <vscale x 2 x i8> %y, <vscale x 2 x i8>* %z
+  store <vscale x 2 x i8> %y, ptr %z
   ret void
 }
 
-define void @truncstore_nxv2i32_nxv2i16(<vscale x 2 x i32> %x, <vscale x 2 x i16>* %z) {
+define void @truncstore_nxv2i32_nxv2i16(<vscale x 2 x i32> %x, ptr %z) {
 ; CHECK-LABEL: truncstore_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
@@ -748,35 +748,35 @@ define void @truncstore_nxv2i32_nxv2i16(<vscale x 2 x i32> %x, <vscale x 2 x i16
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 2 x i32> %x to <vscale x 2 x i16>
-  store <vscale x 2 x i16> %y, <vscale x 2 x i16>* %z
+  store <vscale x 2 x i16> %y, ptr %z
   ret void
 }
 
-define <vscale x 2 x i64> @sextload_nxv2i32_nxv2i64(<vscale x 2 x i32>* %x) {
+define <vscale x 2 x i64> @sextload_nxv2i32_nxv2i64(ptr %x) {
 ; CHECK-LABEL: sextload_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl1re32.v v10, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vsext.vf2 v8, v10
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 2 x i32>, <vscale x 2 x i32>* %x
+  %y = load <vscale x 2 x i32>, ptr %x
   %z = sext <vscale x 2 x i32> %y to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %z
 }
 
-define <vscale x 2 x i64> @zextload_nxv2i32_nxv2i64(<vscale x 2 x i32>* %x) {
+define <vscale x 2 x i64> @zextload_nxv2i32_nxv2i64(ptr %x) {
 ; CHECK-LABEL: zextload_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl1re32.v v10, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vzext.vf2 v8, v10
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 2 x i32>, <vscale x 2 x i32>* %x
+  %y = load <vscale x 2 x i32>, ptr %x
   %z = zext <vscale x 2 x i32> %y to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %z
 }
 
-define void @truncstore_nxv4i32_nxv4i8(<vscale x 4 x i32> %x, <vscale x 4 x i8>* %z) {
+define void @truncstore_nxv4i32_nxv4i8(<vscale x 4 x i32> %x, ptr %z) {
 ; CHECK-LABEL: truncstore_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
@@ -786,11 +786,11 @@ define void @truncstore_nxv4i32_nxv4i8(<vscale x 4 x i32> %x, <vscale x 4 x i8>*
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 4 x i32> %x to <vscale x 4 x i8>
-  store <vscale x 4 x i8> %y, <vscale x 4 x i8>* %z
+  store <vscale x 4 x i8> %y, ptr %z
   ret void
 }
 
-define void @truncstore_nxv4i32_nxv4i16(<vscale x 4 x i32> %x, <vscale x 4 x i16>* %z) {
+define void @truncstore_nxv4i32_nxv4i16(<vscale x 4 x i32> %x, ptr %z) {
 ; CHECK-LABEL: truncstore_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
@@ -798,35 +798,35 @@ define void @truncstore_nxv4i32_nxv4i16(<vscale x 4 x i32> %x, <vscale x 4 x i16
 ; CHECK-NEXT:    vs1r.v v10, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 4 x i32> %x to <vscale x 4 x i16>
-  store <vscale x 4 x i16> %y, <vscale x 4 x i16>* %z
+  store <vscale x 4 x i16> %y, ptr %z
   ret void
 }
 
-define <vscale x 4 x i64> @sextload_nxv4i32_nxv4i64(<vscale x 4 x i32>* %x) {
+define <vscale x 4 x i64> @sextload_nxv4i32_nxv4i64(ptr %x) {
 ; CHECK-LABEL: sextload_nxv4i32_nxv4i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl2re32.v v12, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vsext.vf2 v8, v12
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 4 x i32>, <vscale x 4 x i32>* %x
+  %y = load <vscale x 4 x i32>, ptr %x
   %z = sext <vscale x 4 x i32> %y to <vscale x 4 x i64>
   ret <vscale x 4 x i64> %z
 }
 
-define <vscale x 4 x i64> @zextload_nxv4i32_nxv4i64(<vscale x 4 x i32>* %x) {
+define <vscale x 4 x i64> @zextload_nxv4i32_nxv4i64(ptr %x) {
 ; CHECK-LABEL: zextload_nxv4i32_nxv4i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl2re32.v v12, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vzext.vf2 v8, v12
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 4 x i32>, <vscale x 4 x i32>* %x
+  %y = load <vscale x 4 x i32>, ptr %x
   %z = zext <vscale x 4 x i32> %y to <vscale x 4 x i64>
   ret <vscale x 4 x i64> %z
 }
 
-define void @truncstore_nxv8i32_nxv8i8(<vscale x 8 x i32> %x, <vscale x 8 x i8>* %z) {
+define void @truncstore_nxv8i32_nxv8i8(<vscale x 8 x i32> %x, ptr %z) {
 ; CHECK-LABEL: truncstore_nxv8i32_nxv8i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
@@ -836,11 +836,11 @@ define void @truncstore_nxv8i32_nxv8i8(<vscale x 8 x i32> %x, <vscale x 8 x i8>*
 ; CHECK-NEXT:    vs1r.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 8 x i32> %x to <vscale x 8 x i8>
-  store <vscale x 8 x i8> %y, <vscale x 8 x i8>* %z
+  store <vscale x 8 x i8> %y, ptr %z
   ret void
 }
 
-define void @truncstore_nxv8i32_nxv8i16(<vscale x 8 x i32> %x, <vscale x 8 x i16>* %z) {
+define void @truncstore_nxv8i32_nxv8i16(<vscale x 8 x i32> %x, ptr %z) {
 ; CHECK-LABEL: truncstore_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
@@ -848,35 +848,35 @@ define void @truncstore_nxv8i32_nxv8i16(<vscale x 8 x i32> %x, <vscale x 8 x i16
 ; CHECK-NEXT:    vs2r.v v12, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 8 x i32> %x to <vscale x 8 x i16>
-  store <vscale x 8 x i16> %y, <vscale x 8 x i16>* %z
+  store <vscale x 8 x i16> %y, ptr %z
   ret void
 }
 
-define <vscale x 8 x i64> @sextload_nxv8i32_nxv8i64(<vscale x 8 x i32>* %x) {
+define <vscale x 8 x i64> @sextload_nxv8i32_nxv8i64(ptr %x) {
 ; CHECK-LABEL: sextload_nxv8i32_nxv8i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl4re32.v v16, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vsext.vf2 v8, v16
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 8 x i32>, <vscale x 8 x i32>* %x
+  %y = load <vscale x 8 x i32>, ptr %x
   %z = sext <vscale x 8 x i32> %y to <vscale x 8 x i64>
   ret <vscale x 8 x i64> %z
 }
 
-define <vscale x 8 x i64> @zextload_nxv8i32_nxv8i64(<vscale x 8 x i32>* %x) {
+define <vscale x 8 x i64> @zextload_nxv8i32_nxv8i64(ptr %x) {
 ; CHECK-LABEL: zextload_nxv8i32_nxv8i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl4re32.v v16, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vzext.vf2 v8, v16
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 8 x i32>, <vscale x 8 x i32>* %x
+  %y = load <vscale x 8 x i32>, ptr %x
   %z = zext <vscale x 8 x i32> %y to <vscale x 8 x i64>
   ret <vscale x 8 x i64> %z
 }
 
-define void @truncstore_nxv16i32_nxv16i8(<vscale x 16 x i32> %x, <vscale x 16 x i8>* %z) {
+define void @truncstore_nxv16i32_nxv16i8(<vscale x 16 x i32> %x, ptr %z) {
 ; CHECK-LABEL: truncstore_nxv16i32_nxv16i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
@@ -886,11 +886,11 @@ define void @truncstore_nxv16i32_nxv16i8(<vscale x 16 x i32> %x, <vscale x 16 x
 ; CHECK-NEXT:    vs2r.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 16 x i32> %x to <vscale x 16 x i8>
-  store <vscale x 16 x i8> %y, <vscale x 16 x i8>* %z
+  store <vscale x 16 x i8> %y, ptr %z
   ret void
 }
 
-define void @truncstore_nxv16i32_nxv16i16(<vscale x 16 x i32> %x, <vscale x 16 x i16>* %z) {
+define void @truncstore_nxv16i32_nxv16i16(<vscale x 16 x i32> %x, ptr %z) {
 ; CHECK-LABEL: truncstore_nxv16i32_nxv16i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
@@ -898,11 +898,11 @@ define void @truncstore_nxv16i32_nxv16i16(<vscale x 16 x i32> %x, <vscale x 16 x
 ; CHECK-NEXT:    vs4r.v v16, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 16 x i32> %x to <vscale x 16 x i16>
-  store <vscale x 16 x i16> %y, <vscale x 16 x i16>* %z
+  store <vscale x 16 x i16> %y, ptr %z
   ret void
 }
 
-define void @truncstore_nxv1i64_nxv1i8(<vscale x 1 x i64> %x, <vscale x 1 x i8>* %z) {
+define void @truncstore_nxv1i64_nxv1i8(<vscale x 1 x i64> %x, ptr %z) {
 ; CHECK-LABEL: truncstore_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
@@ -914,11 +914,11 @@ define void @truncstore_nxv1i64_nxv1i8(<vscale x 1 x i64> %x, <vscale x 1 x i8>*
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 1 x i64> %x to <vscale x 1 x i8>
-  store <vscale x 1 x i8> %y, <vscale x 1 x i8>* %z
+  store <vscale x 1 x i8> %y, ptr %z
   ret void
 }
 
-define void @truncstore_nxv1i64_nxv1i16(<vscale x 1 x i64> %x, <vscale x 1 x i16>* %z) {
+define void @truncstore_nxv1i64_nxv1i16(<vscale x 1 x i64> %x, ptr %z) {
 ; CHECK-LABEL: truncstore_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
@@ -928,11 +928,11 @@ define void @truncstore_nxv1i64_nxv1i16(<vscale x 1 x i64> %x, <vscale x 1 x i16
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 1 x i64> %x to <vscale x 1 x i16>
-  store <vscale x 1 x i16> %y, <vscale x 1 x i16>* %z
+  store <vscale x 1 x i16> %y, ptr %z
   ret void
 }
 
-define void @truncstore_nxv1i64_nxv1i32(<vscale x 1 x i64> %x, <vscale x 1 x i32>* %z) {
+define void @truncstore_nxv1i64_nxv1i32(<vscale x 1 x i64> %x, ptr %z) {
 ; CHECK-LABEL: truncstore_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
@@ -940,11 +940,11 @@ define void @truncstore_nxv1i64_nxv1i32(<vscale x 1 x i64> %x, <vscale x 1 x i32
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 1 x i64> %x to <vscale x 1 x i32>
-  store <vscale x 1 x i32> %y, <vscale x 1 x i32>* %z
+  store <vscale x 1 x i32> %y, ptr %z
   ret void
 }
 
-define void @truncstore_nxv2i64_nxv2i8(<vscale x 2 x i64> %x, <vscale x 2 x i8>* %z) {
+define void @truncstore_nxv2i64_nxv2i8(<vscale x 2 x i64> %x, ptr %z) {
 ; CHECK-LABEL: truncstore_nxv2i64_nxv2i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
@@ -956,11 +956,11 @@ define void @truncstore_nxv2i64_nxv2i8(<vscale x 2 x i64> %x, <vscale x 2 x i8>*
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 2 x i64> %x to <vscale x 2 x i8>
-  store <vscale x 2 x i8> %y, <vscale x 2 x i8>* %z
+  store <vscale x 2 x i8> %y, ptr %z
   ret void
 }
 
-define void @truncstore_nxv2i64_nxv2i16(<vscale x 2 x i64> %x, <vscale x 2 x i16>* %z) {
+define void @truncstore_nxv2i64_nxv2i16(<vscale x 2 x i64> %x, ptr %z) {
 ; CHECK-LABEL: truncstore_nxv2i64_nxv2i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
@@ -970,11 +970,11 @@ define void @truncstore_nxv2i64_nxv2i16(<vscale x 2 x i64> %x, <vscale x 2 x i16
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 2 x i64> %x to <vscale x 2 x i16>
-  store <vscale x 2 x i16> %y, <vscale x 2 x i16>* %z
+  store <vscale x 2 x i16> %y, ptr %z
   ret void
 }
 
-define void @truncstore_nxv2i64_nxv2i32(<vscale x 2 x i64> %x, <vscale x 2 x i32>* %z) {
+define void @truncstore_nxv2i64_nxv2i32(<vscale x 2 x i64> %x, ptr %z) {
 ; CHECK-LABEL: truncstore_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
@@ -982,11 +982,11 @@ define void @truncstore_nxv2i64_nxv2i32(<vscale x 2 x i64> %x, <vscale x 2 x i32
 ; CHECK-NEXT:    vs1r.v v10, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 2 x i64> %x to <vscale x 2 x i32>
-  store <vscale x 2 x i32> %y, <vscale x 2 x i32>* %z
+  store <vscale x 2 x i32> %y, ptr %z
   ret void
 }
 
-define void @truncstore_nxv4i64_nxv4i8(<vscale x 4 x i64> %x, <vscale x 4 x i8>* %z) {
+define void @truncstore_nxv4i64_nxv4i8(<vscale x 4 x i64> %x, ptr %z) {
 ; CHECK-LABEL: truncstore_nxv4i64_nxv4i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
@@ -998,11 +998,11 @@ define void @truncstore_nxv4i64_nxv4i8(<vscale x 4 x i64> %x, <vscale x 4 x i8>*
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 4 x i64> %x to <vscale x 4 x i8>
-  store <vscale x 4 x i8> %y, <vscale x 4 x i8>* %z
+  store <vscale x 4 x i8> %y, ptr %z
   ret void
 }
 
-define void @truncstore_nxv4i64_nxv4i16(<vscale x 4 x i64> %x, <vscale x 4 x i16>* %z) {
+define void @truncstore_nxv4i64_nxv4i16(<vscale x 4 x i64> %x, ptr %z) {
 ; CHECK-LABEL: truncstore_nxv4i64_nxv4i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
@@ -1012,11 +1012,11 @@ define void @truncstore_nxv4i64_nxv4i16(<vscale x 4 x i64> %x, <vscale x 4 x i16
 ; CHECK-NEXT:    vs1r.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 4 x i64> %x to <vscale x 4 x i16>
-  store <vscale x 4 x i16> %y, <vscale x 4 x i16>* %z
+  store <vscale x 4 x i16> %y, ptr %z
   ret void
 }
 
-define void @truncstore_nxv4i64_nxv4i32(<vscale x 4 x i64> %x, <vscale x 4 x i32>* %z) {
+define void @truncstore_nxv4i64_nxv4i32(<vscale x 4 x i64> %x, ptr %z) {
 ; CHECK-LABEL: truncstore_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
@@ -1024,11 +1024,11 @@ define void @truncstore_nxv4i64_nxv4i32(<vscale x 4 x i64> %x, <vscale x 4 x i32
 ; CHECK-NEXT:    vs2r.v v12, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 4 x i64> %x to <vscale x 4 x i32>
-  store <vscale x 4 x i32> %y, <vscale x 4 x i32>* %z
+  store <vscale x 4 x i32> %y, ptr %z
   ret void
 }
 
-define void @truncstore_nxv8i64_nxv8i8(<vscale x 8 x i64> %x, <vscale x 8 x i8>* %z) {
+define void @truncstore_nxv8i64_nxv8i8(<vscale x 8 x i64> %x, ptr %z) {
 ; CHECK-LABEL: truncstore_nxv8i64_nxv8i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
@@ -1040,11 +1040,11 @@ define void @truncstore_nxv8i64_nxv8i8(<vscale x 8 x i64> %x, <vscale x 8 x i8>*
 ; CHECK-NEXT:    vs1r.v v10, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 8 x i64> %x to <vscale x 8 x i8>
-  store <vscale x 8 x i8> %y, <vscale x 8 x i8>* %z
+  store <vscale x 8 x i8> %y, ptr %z
   ret void
 }
 
-define void @truncstore_nxv8i64_nxv8i16(<vscale x 8 x i64> %x, <vscale x 8 x i16>* %z) {
+define void @truncstore_nxv8i64_nxv8i16(<vscale x 8 x i64> %x, ptr %z) {
 ; CHECK-LABEL: truncstore_nxv8i64_nxv8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
@@ -1054,11 +1054,11 @@ define void @truncstore_nxv8i64_nxv8i16(<vscale x 8 x i64> %x, <vscale x 8 x i16
 ; CHECK-NEXT:    vs2r.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 8 x i64> %x to <vscale x 8 x i16>
-  store <vscale x 8 x i16> %y, <vscale x 8 x i16>* %z
+  store <vscale x 8 x i16> %y, ptr %z
   ret void
 }
 
-define void @truncstore_nxv8i64_nxv8i32(<vscale x 8 x i64> %x, <vscale x 8 x i32>* %z) {
+define void @truncstore_nxv8i64_nxv8i32(<vscale x 8 x i64> %x, ptr %z) {
 ; CHECK-LABEL: truncstore_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
@@ -1066,23 +1066,23 @@ define void @truncstore_nxv8i64_nxv8i32(<vscale x 8 x i64> %x, <vscale x 8 x i32
 ; CHECK-NEXT:    vs4r.v v16, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 8 x i64> %x to <vscale x 8 x i32>
-  store <vscale x 8 x i32> %y, <vscale x 8 x i32>* %z
+  store <vscale x 8 x i32> %y, ptr %z
   ret void
 }
 
-define <vscale x 1 x float> @extload_nxv1f16_nxv1f32(<vscale x 1 x half>* %x) {
+define <vscale x 1 x float> @extload_nxv1f16_nxv1f32(ptr %x) {
 ; CHECK-LABEL: extload_nxv1f16_nxv1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vle16.v v9, (a0)
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v9
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 1 x half>, <vscale x 1 x half>* %x
+  %y = load <vscale x 1 x half>, ptr %x
   %z = fpext <vscale x 1 x half> %y to <vscale x 1 x float>
   ret <vscale x 1 x float> %z
 }
 
-define <vscale x 1 x double> @extload_nxv1f16_nxv1f64(<vscale x 1 x half>* %x) {
+define <vscale x 1 x double> @extload_nxv1f16_nxv1f64(ptr %x) {
 ; CHECK-LABEL: extload_nxv1f16_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
@@ -1091,24 +1091,24 @@ define <vscale x 1 x double> @extload_nxv1f16_nxv1f64(<vscale x 1 x half>* %x) {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v9
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 1 x half>, <vscale x 1 x half>* %x
+  %y = load <vscale x 1 x half>, ptr %x
   %z = fpext <vscale x 1 x half> %y to <vscale x 1 x double>
   ret <vscale x 1 x double> %z
 }
 
-define <vscale x 2 x float> @extload_nxv2f16_nxv2f32(<vscale x 2 x half>* %x) {
+define <vscale x 2 x float> @extload_nxv2f16_nxv2f32(ptr %x) {
 ; CHECK-LABEL: extload_nxv2f16_nxv2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vle16.v v9, (a0)
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v9
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 2 x half>, <vscale x 2 x half>* %x
+  %y = load <vscale x 2 x half>, ptr %x
   %z = fpext <vscale x 2 x half> %y to <vscale x 2 x float>
   ret <vscale x 2 x float> %z
 }
 
-define <vscale x 2 x double> @extload_nxv2f16_nxv2f64(<vscale x 2 x half>* %x) {
+define <vscale x 2 x double> @extload_nxv2f16_nxv2f64(ptr %x) {
 ; CHECK-LABEL: extload_nxv2f16_nxv2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
@@ -1117,24 +1117,24 @@ define <vscale x 2 x double> @extload_nxv2f16_nxv2f64(<vscale x 2 x half>* %x) {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v10
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 2 x half>, <vscale x 2 x half>* %x
+  %y = load <vscale x 2 x half>, ptr %x
   %z = fpext <vscale x 2 x half> %y to <vscale x 2 x double>
   ret <vscale x 2 x double> %z
 }
 
-define <vscale x 4 x float> @extload_nxv4f16_nxv4f32(<vscale x 4 x half>* %x) {
+define <vscale x 4 x float> @extload_nxv4f16_nxv4f32(ptr %x) {
 ; CHECK-LABEL: extload_nxv4f16_nxv4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl1re16.v v10, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v10
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 4 x half>, <vscale x 4 x half>* %x
+  %y = load <vscale x 4 x half>, ptr %x
   %z = fpext <vscale x 4 x half> %y to <vscale x 4 x float>
   ret <vscale x 4 x float> %z
 }
 
-define <vscale x 4 x double> @extload_nxv4f16_nxv4f64(<vscale x 4 x half>* %x) {
+define <vscale x 4 x double> @extload_nxv4f16_nxv4f64(ptr %x) {
 ; CHECK-LABEL: extload_nxv4f16_nxv4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl1re16.v v8, (a0)
@@ -1143,24 +1143,24 @@ define <vscale x 4 x double> @extload_nxv4f16_nxv4f64(<vscale x 4 x half>* %x) {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v12
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 4 x half>, <vscale x 4 x half>* %x
+  %y = load <vscale x 4 x half>, ptr %x
   %z = fpext <vscale x 4 x half> %y to <vscale x 4 x double>
   ret <vscale x 4 x double> %z
 }
 
-define <vscale x 8 x float> @extload_nxv8f16_nxv8f32(<vscale x 8 x half>* %x) {
+define <vscale x 8 x float> @extload_nxv8f16_nxv8f32(ptr %x) {
 ; CHECK-LABEL: extload_nxv8f16_nxv8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl2re16.v v12, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v12
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 8 x half>, <vscale x 8 x half>* %x
+  %y = load <vscale x 8 x half>, ptr %x
   %z = fpext <vscale x 8 x half> %y to <vscale x 8 x float>
   ret <vscale x 8 x float> %z
 }
 
-define <vscale x 8 x double> @extload_nxv8f16_nxv8f64(<vscale x 8 x half>* %x) {
+define <vscale x 8 x double> @extload_nxv8f16_nxv8f64(ptr %x) {
 ; CHECK-LABEL: extload_nxv8f16_nxv8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl2re16.v v8, (a0)
@@ -1169,24 +1169,24 @@ define <vscale x 8 x double> @extload_nxv8f16_nxv8f64(<vscale x 8 x half>* %x) {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v16
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 8 x half>, <vscale x 8 x half>* %x
+  %y = load <vscale x 8 x half>, ptr %x
   %z = fpext <vscale x 8 x half> %y to <vscale x 8 x double>
   ret <vscale x 8 x double> %z
 }
 
-define <vscale x 16 x float> @extload_nxv16f16_nxv16f32(<vscale x 16 x half>* %x) {
+define <vscale x 16 x float> @extload_nxv16f16_nxv16f32(ptr %x) {
 ; CHECK-LABEL: extload_nxv16f16_nxv16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl4re16.v v16, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v16
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 16 x half>, <vscale x 16 x half>* %x
+  %y = load <vscale x 16 x half>, ptr %x
   %z = fpext <vscale x 16 x half> %y to <vscale x 16 x float>
   ret <vscale x 16 x float> %z
 }
 
-define void @truncstore_nxv1f32_nxv1f16(<vscale x 1 x float> %x, <vscale x 1 x half>* %z) {
+define void @truncstore_nxv1f32_nxv1f16(<vscale x 1 x float> %x, ptr %z) {
 ; CHECK-LABEL: truncstore_nxv1f32_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
@@ -1194,23 +1194,23 @@ define void @truncstore_nxv1f32_nxv1f16(<vscale x 1 x float> %x, <vscale x 1 x h
 ; CHECK-NEXT:    vse16.v v9, (a0)
 ; CHECK-NEXT:    ret
   %y = fptrunc <vscale x 1 x float> %x to <vscale x 1 x half>
-  store <vscale x 1 x half> %y, <vscale x 1 x half>* %z
+  store <vscale x 1 x half> %y, ptr %z
   ret void
 }
 
-define <vscale x 1 x double> @extload_nxv1f32_nxv1f64(<vscale x 1 x float>* %x) {
+define <vscale x 1 x double> @extload_nxv1f32_nxv1f64(ptr %x) {
 ; CHECK-LABEL: extload_nxv1f32_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vle32.v v9, (a0)
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v9
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 1 x float>, <vscale x 1 x float>* %x
+  %y = load <vscale x 1 x float>, ptr %x
   %z = fpext <vscale x 1 x float> %y to <vscale x 1 x double>
   ret <vscale x 1 x double> %z
 }
 
-define void @truncstore_nxv2f32_nxv2f16(<vscale x 2 x float> %x, <vscale x 2 x half>* %z) {
+define void @truncstore_nxv2f32_nxv2f16(<vscale x 2 x float> %x, ptr %z) {
 ; CHECK-LABEL: truncstore_nxv2f32_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
@@ -1218,23 +1218,23 @@ define void @truncstore_nxv2f32_nxv2f16(<vscale x 2 x float> %x, <vscale x 2 x h
 ; CHECK-NEXT:    vse16.v v9, (a0)
 ; CHECK-NEXT:    ret
   %y = fptrunc <vscale x 2 x float> %x to <vscale x 2 x half>
-  store <vscale x 2 x half> %y, <vscale x 2 x half>* %z
+  store <vscale x 2 x half> %y, ptr %z
   ret void
 }
 
-define <vscale x 2 x double> @extload_nxv2f32_nxv2f64(<vscale x 2 x float>* %x) {
+define <vscale x 2 x double> @extload_nxv2f32_nxv2f64(ptr %x) {
 ; CHECK-LABEL: extload_nxv2f32_nxv2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl1re32.v v10, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v10
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 2 x float>, <vscale x 2 x float>* %x
+  %y = load <vscale x 2 x float>, ptr %x
   %z = fpext <vscale x 2 x float> %y to <vscale x 2 x double>
   ret <vscale x 2 x double> %z
 }
 
-define void @truncstore_nxv4f32_nxv4f16(<vscale x 4 x float> %x, <vscale x 4 x half>* %z) {
+define void @truncstore_nxv4f32_nxv4f16(<vscale x 4 x float> %x, ptr %z) {
 ; CHECK-LABEL: truncstore_nxv4f32_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
@@ -1242,23 +1242,23 @@ define void @truncstore_nxv4f32_nxv4f16(<vscale x 4 x float> %x, <vscale x 4 x h
 ; CHECK-NEXT:    vs1r.v v10, (a0)
 ; CHECK-NEXT:    ret
   %y = fptrunc <vscale x 4 x float> %x to <vscale x 4 x half>
-  store <vscale x 4 x half> %y, <vscale x 4 x half>* %z
+  store <vscale x 4 x half> %y, ptr %z
   ret void
 }
 
-define <vscale x 4 x double> @extload_nxv4f32_nxv4f64(<vscale x 4 x float>* %x) {
+define <vscale x 4 x double> @extload_nxv4f32_nxv4f64(ptr %x) {
 ; CHECK-LABEL: extload_nxv4f32_nxv4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl2re32.v v12, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v12
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 4 x float>, <vscale x 4 x float>* %x
+  %y = load <vscale x 4 x float>, ptr %x
   %z = fpext <vscale x 4 x float> %y to <vscale x 4 x double>
   ret <vscale x 4 x double> %z
 }
 
-define void @truncstore_nxv8f32_nxv8f16(<vscale x 8 x float> %x, <vscale x 8 x half>* %z) {
+define void @truncstore_nxv8f32_nxv8f16(<vscale x 8 x float> %x, ptr %z) {
 ; CHECK-LABEL: truncstore_nxv8f32_nxv8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
@@ -1266,23 +1266,23 @@ define void @truncstore_nxv8f32_nxv8f16(<vscale x 8 x float> %x, <vscale x 8 x h
 ; CHECK-NEXT:    vs2r.v v12, (a0)
 ; CHECK-NEXT:    ret
   %y = fptrunc <vscale x 8 x float> %x to <vscale x 8 x half>
-  store <vscale x 8 x half> %y, <vscale x 8 x half>* %z
+  store <vscale x 8 x half> %y, ptr %z
   ret void
 }
 
-define <vscale x 8 x double> @extload_nxv8f32_nxv8f64(<vscale x 8 x float>* %x) {
+define <vscale x 8 x double> @extload_nxv8f32_nxv8f64(ptr %x) {
 ; CHECK-LABEL: extload_nxv8f32_nxv8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl4re32.v v16, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v16
 ; CHECK-NEXT:    ret
-  %y = load <vscale x 8 x float>, <vscale x 8 x float>* %x
+  %y = load <vscale x 8 x float>, ptr %x
   %z = fpext <vscale x 8 x float> %y to <vscale x 8 x double>
   ret <vscale x 8 x double> %z
 }
 
-define void @truncstore_nxv16f32_nxv16f16(<vscale x 16 x float> %x, <vscale x 16 x half>* %z) {
+define void @truncstore_nxv16f32_nxv16f16(<vscale x 16 x float> %x, ptr %z) {
 ; CHECK-LABEL: truncstore_nxv16f32_nxv16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
@@ -1290,11 +1290,11 @@ define void @truncstore_nxv16f32_nxv16f16(<vscale x 16 x float> %x, <vscale x 16
 ; CHECK-NEXT:    vs4r.v v16, (a0)
 ; CHECK-NEXT:    ret
   %y = fptrunc <vscale x 16 x float> %x to <vscale x 16 x half>
-  store <vscale x 16 x half> %y, <vscale x 16 x half>* %z
+  store <vscale x 16 x half> %y, ptr %z
   ret void
 }
 
-define void @truncstore_nxv1f64_nxv1f16(<vscale x 1 x double> %x, <vscale x 1 x half>* %z) {
+define void @truncstore_nxv1f64_nxv1f16(<vscale x 1 x double> %x, ptr %z) {
 ; CHECK-LABEL: truncstore_nxv1f64_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
@@ -1304,11 +1304,11 @@ define void @truncstore_nxv1f64_nxv1f16(<vscale x 1 x double> %x, <vscale x 1 x
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = fptrunc <vscale x 1 x double> %x to <vscale x 1 x half>
-  store <vscale x 1 x half> %y, <vscale x 1 x half>* %z
+  store <vscale x 1 x half> %y, ptr %z
   ret void
 }
 
-define void @truncstore_nxv1f64_nxv1f32(<vscale x 1 x double> %x, <vscale x 1 x float>* %z) {
+define void @truncstore_nxv1f64_nxv1f32(<vscale x 1 x double> %x, ptr %z) {
 ; CHECK-LABEL: truncstore_nxv1f64_nxv1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
@@ -1316,11 +1316,11 @@ define void @truncstore_nxv1f64_nxv1f32(<vscale x 1 x double> %x, <vscale x 1 x
 ; CHECK-NEXT:    vse32.v v9, (a0)
 ; CHECK-NEXT:    ret
   %y = fptrunc <vscale x 1 x double> %x to <vscale x 1 x float>
-  store <vscale x 1 x float> %y, <vscale x 1 x float>* %z
+  store <vscale x 1 x float> %y, ptr %z
   ret void
 }
 
-define void @truncstore_nxv2f64_nxv2f16(<vscale x 2 x double> %x, <vscale x 2 x half>* %z) {
+define void @truncstore_nxv2f64_nxv2f16(<vscale x 2 x double> %x, ptr %z) {
 ; CHECK-LABEL: truncstore_nxv2f64_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
@@ -1330,11 +1330,11 @@ define void @truncstore_nxv2f64_nxv2f16(<vscale x 2 x double> %x, <vscale x 2 x
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = fptrunc <vscale x 2 x double> %x to <vscale x 2 x half>
-  store <vscale x 2 x half> %y, <vscale x 2 x half>* %z
+  store <vscale x 2 x half> %y, ptr %z
   ret void
 }
 
-define void @truncstore_nxv2f64_nxv2f32(<vscale x 2 x double> %x, <vscale x 2 x float>* %z) {
+define void @truncstore_nxv2f64_nxv2f32(<vscale x 2 x double> %x, ptr %z) {
 ; CHECK-LABEL: truncstore_nxv2f64_nxv2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
@@ -1342,11 +1342,11 @@ define void @truncstore_nxv2f64_nxv2f32(<vscale x 2 x double> %x, <vscale x 2 x
 ; CHECK-NEXT:    vs1r.v v10, (a0)
 ; CHECK-NEXT:    ret
   %y = fptrunc <vscale x 2 x double> %x to <vscale x 2 x float>
-  store <vscale x 2 x float> %y, <vscale x 2 x float>* %z
+  store <vscale x 2 x float> %y, ptr %z
   ret void
 }
 
-define void @truncstore_nxv4f64_nxv4f16(<vscale x 4 x double> %x, <vscale x 4 x half>* %z) {
+define void @truncstore_nxv4f64_nxv4f16(<vscale x 4 x double> %x, ptr %z) {
 ; CHECK-LABEL: truncstore_nxv4f64_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
@@ -1356,11 +1356,11 @@ define void @truncstore_nxv4f64_nxv4f16(<vscale x 4 x double> %x, <vscale x 4 x
 ; CHECK-NEXT:    vs1r.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = fptrunc <vscale x 4 x double> %x to <vscale x 4 x half>
-  store <vscale x 4 x half> %y, <vscale x 4 x half>* %z
+  store <vscale x 4 x half> %y, ptr %z
   ret void
 }
 
-define void @truncstore_nxv4f64_nxv4f32(<vscale x 4 x double> %x, <vscale x 4 x float>* %z) {
+define void @truncstore_nxv4f64_nxv4f32(<vscale x 4 x double> %x, ptr %z) {
 ; CHECK-LABEL: truncstore_nxv4f64_nxv4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
@@ -1368,11 +1368,11 @@ define void @truncstore_nxv4f64_nxv4f32(<vscale x 4 x double> %x, <vscale x 4 x
 ; CHECK-NEXT:    vs2r.v v12, (a0)
 ; CHECK-NEXT:    ret
   %y = fptrunc <vscale x 4 x double> %x to <vscale x 4 x float>
-  store <vscale x 4 x float> %y, <vscale x 4 x float>* %z
+  store <vscale x 4 x float> %y, ptr %z
   ret void
 }
 
-define void @truncstore_nxv8f64_nxv8f16(<vscale x 8 x double> %x, <vscale x 8 x half>* %z) {
+define void @truncstore_nxv8f64_nxv8f16(<vscale x 8 x double> %x, ptr %z) {
 ; CHECK-LABEL: truncstore_nxv8f64_nxv8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
@@ -1382,11 +1382,11 @@ define void @truncstore_nxv8f64_nxv8f16(<vscale x 8 x double> %x, <vscale x 8 x
 ; CHECK-NEXT:    vs2r.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = fptrunc <vscale x 8 x double> %x to <vscale x 8 x half>
-  store <vscale x 8 x half> %y, <vscale x 8 x half>* %z
+  store <vscale x 8 x half> %y, ptr %z
   ret void
 }
 
-define void @truncstore_nxv8f64_nxv8f32(<vscale x 8 x double> %x, <vscale x 8 x float>* %z) {
+define void @truncstore_nxv8f64_nxv8f32(<vscale x 8 x double> %x, ptr %z) {
 ; CHECK-LABEL: truncstore_nxv8f64_nxv8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
@@ -1394,6 +1394,6 @@ define void @truncstore_nxv8f64_nxv8f32(<vscale x 8 x double> %x, <vscale x 8 x
 ; CHECK-NEXT:    vs4r.v v16, (a0)
 ; CHECK-NEXT:    ret
   %y = fptrunc <vscale x 8 x double> %x to <vscale x 8 x float>
-  store <vscale x 8 x float> %y, <vscale x 8 x float>* %z
+  store <vscale x 8 x float> %y, ptr %z
   ret void
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll
index 36dfd631b7664..f1ac9aa53f579 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll
@@ -484,7 +484,7 @@ define double @extractelt_nxv8f64_idx(<vscale x 8 x double> %v, i32 zeroext %idx
   ret double %r
 }
 
-define void @store_extractelt_nxv8f64(<vscale x 8 x double>* %x, double* %p) {
+define void @store_extractelt_nxv8f64(ptr %x, ptr %p) {
 ; CHECK-LABEL: store_extractelt_nxv8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl8re64.v v8, (a0)
@@ -492,22 +492,22 @@ define void @store_extractelt_nxv8f64(<vscale x 8 x double>* %x, double* %p) {
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 1
 ; CHECK-NEXT:    vse64.v v8, (a1)
 ; CHECK-NEXT:    ret
-  %a = load <vscale x 8 x double>, <vscale x 8 x double>* %x
+  %a = load <vscale x 8 x double>, ptr %x
   %b = extractelement <vscale x 8 x double> %a, i64 1
-  store double %b, double* %p
+  store double %b, ptr %p
   ret void
 }
 
-define void @store_vfmv_f_s_nxv8f64(<vscale x 8 x double>* %x, double* %p) {
+define void @store_vfmv_f_s_nxv8f64(ptr %x, ptr %p) {
 ; CHECK-LABEL: store_vfmv_f_s_nxv8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl8re64.v v8, (a0)
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-NEXT:    vse64.v v8, (a1)
 ; CHECK-NEXT:    ret
-  %a = load <vscale x 8 x double>, <vscale x 8 x double>* %x
+  %a = load <vscale x 8 x double>, ptr %x
   %b = call double @llvm.riscv.vfmv.f.s.nxv8f64(<vscale x 8 x double> %a)
-  store double %b, double* %p
+  store double %b, ptr %p
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll
index ba8486780197e..e69b4789a09af 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
 
-define i1 @extractelt_nxv1i1(<vscale x 1 x i8>* %x, i64 %idx) nounwind {
+define i1 @extractelt_nxv1i1(ptr %x, i64 %idx) nounwind {
 ; CHECK-LABEL: extractelt_nxv1i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a2, zero, e8, mf8, ta, ma
@@ -13,13 +13,13 @@ define i1 @extractelt_nxv1i1(<vscale x 1 x i8>* %x, i64 %idx) nounwind {
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a1
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
-  %a = load <vscale x 1 x i8>, <vscale x 1 x i8>* %x
+  %a = load <vscale x 1 x i8>, ptr %x
   %b = icmp eq <vscale x 1 x i8> %a, zeroinitializer
   %c = extractelement <vscale x 1 x i1> %b, i64 %idx
   ret i1 %c
 }
 
-define i1 @extractelt_nxv2i1(<vscale x 2 x i8>* %x, i64 %idx) nounwind {
+define i1 @extractelt_nxv2i1(ptr %x, i64 %idx) nounwind {
 ; CHECK-LABEL: extractelt_nxv2i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a2, zero, e8, mf4, ta, ma
@@ -30,13 +30,13 @@ define i1 @extractelt_nxv2i1(<vscale x 2 x i8>* %x, i64 %idx) nounwind {
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a1
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
-  %a = load <vscale x 2 x i8>, <vscale x 2 x i8>* %x
+  %a = load <vscale x 2 x i8>, ptr %x
   %b = icmp eq <vscale x 2 x i8> %a, zeroinitializer
   %c = extractelement <vscale x 2 x i1> %b, i64 %idx
   ret i1 %c
 }
 
-define i1 @extractelt_nxv4i1(<vscale x 4 x i8>* %x, i64 %idx) nounwind {
+define i1 @extractelt_nxv4i1(ptr %x, i64 %idx) nounwind {
 ; CHECK-LABEL: extractelt_nxv4i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a2, zero, e8, mf2, ta, ma
@@ -47,13 +47,13 @@ define i1 @extractelt_nxv4i1(<vscale x 4 x i8>* %x, i64 %idx) nounwind {
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a1
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
-  %a = load <vscale x 4 x i8>, <vscale x 4 x i8>* %x
+  %a = load <vscale x 4 x i8>, ptr %x
   %b = icmp eq <vscale x 4 x i8> %a, zeroinitializer
   %c = extractelement <vscale x 4 x i1> %b, i64 %idx
   ret i1 %c
 }
 
-define i1 @extractelt_nxv8i1(<vscale x 8 x i8>* %x, i64 %idx) nounwind {
+define i1 @extractelt_nxv8i1(ptr %x, i64 %idx) nounwind {
 ; CHECK-LABEL: extractelt_nxv8i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl1r.v v8, (a0)
@@ -64,13 +64,13 @@ define i1 @extractelt_nxv8i1(<vscale x 8 x i8>* %x, i64 %idx) nounwind {
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a1
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
-  %a = load <vscale x 8 x i8>, <vscale x 8 x i8>* %x
+  %a = load <vscale x 8 x i8>, ptr %x
   %b = icmp eq <vscale x 8 x i8> %a, zeroinitializer
   %c = extractelement <vscale x 8 x i1> %b, i64 %idx
   ret i1 %c
 }
 
-define i1 @extractelt_nxv16i1(<vscale x 16 x i8>* %x, i64 %idx) nounwind {
+define i1 @extractelt_nxv16i1(ptr %x, i64 %idx) nounwind {
 ; CHECK-LABEL: extractelt_nxv16i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl2r.v v8, (a0)
@@ -82,13 +82,13 @@ define i1 @extractelt_nxv16i1(<vscale x 16 x i8>* %x, i64 %idx) nounwind {
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a1
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
-  %a = load <vscale x 16 x i8>, <vscale x 16 x i8>* %x
+  %a = load <vscale x 16 x i8>, ptr %x
   %b = icmp eq <vscale x 16 x i8> %a, zeroinitializer
   %c = extractelement <vscale x 16 x i1> %b, i64 %idx
   ret i1 %c
 }
 
-define i1 @extractelt_nxv32i1(<vscale x 32 x i8>* %x, i64 %idx) nounwind {
+define i1 @extractelt_nxv32i1(ptr %x, i64 %idx) nounwind {
 ; CHECK-LABEL: extractelt_nxv32i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl4r.v v8, (a0)
@@ -100,13 +100,13 @@ define i1 @extractelt_nxv32i1(<vscale x 32 x i8>* %x, i64 %idx) nounwind {
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a1
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
-  %a = load <vscale x 32 x i8>, <vscale x 32 x i8>* %x
+  %a = load <vscale x 32 x i8>, ptr %x
   %b = icmp eq <vscale x 32 x i8> %a, zeroinitializer
   %c = extractelement <vscale x 32 x i1> %b, i64 %idx
   ret i1 %c
 }
 
-define i1 @extractelt_nxv64i1(<vscale x 64 x i8>* %x, i64 %idx) nounwind {
+define i1 @extractelt_nxv64i1(ptr %x, i64 %idx) nounwind {
 ; CHECK-LABEL: extractelt_nxv64i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl8r.v v8, (a0)
@@ -118,13 +118,13 @@ define i1 @extractelt_nxv64i1(<vscale x 64 x i8>* %x, i64 %idx) nounwind {
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a1
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
-  %a = load <vscale x 64 x i8>, <vscale x 64 x i8>* %x
+  %a = load <vscale x 64 x i8>, ptr %x
   %b = icmp eq <vscale x 64 x i8> %a, zeroinitializer
   %c = extractelement <vscale x 64 x i1> %b, i64 %idx
   ret i1 %c
 }
 
-define i1 @extractelt_nxv128i1(<vscale x 128 x i8>* %x, i64 %idx) nounwind {
+define i1 @extractelt_nxv128i1(ptr %x, i64 %idx) nounwind {
 ; RV32-LABEL: extractelt_nxv128i1:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    csrr a2, vlenb
@@ -204,13 +204,13 @@ define i1 @extractelt_nxv128i1(<vscale x 128 x i8>* %x, i64 %idx) nounwind {
 ; RV64-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    addi sp, sp, 80
 ; RV64-NEXT:    ret
-  %a = load <vscale x 128 x i8>, <vscale x 128 x i8>* %x
+  %a = load <vscale x 128 x i8>, ptr %x
   %b = icmp eq <vscale x 128 x i8> %a, zeroinitializer
   %c = extractelement <vscale x 128 x i1> %b, i64 %idx
   ret i1 %c
 }
 
-define i1 @extractelt_nxv1i1_idx0(<vscale x 1 x i8>* %x) nounwind {
+define i1 @extractelt_nxv1i1_idx0(ptr %x) nounwind {
 ; CHECK-LABEL: extractelt_nxv1i1_idx0:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
@@ -219,13 +219,13 @@ define i1 @extractelt_nxv1i1_idx0(<vscale x 1 x i8>* %x) nounwind {
 ; CHECK-NEXT:    vfirst.m a0, v8
 ; CHECK-NEXT:    seqz a0, a0
 ; CHECK-NEXT:    ret
-  %a = load <vscale x 1 x i8>, <vscale x 1 x i8>* %x
+  %a = load <vscale x 1 x i8>, ptr %x
   %b = icmp eq <vscale x 1 x i8> %a, zeroinitializer
   %c = extractelement <vscale x 1 x i1> %b, i64 0
   ret i1 %c
 }
 
-define i1 @extractelt_nxv2i1_idx0(<vscale x 2 x i8>* %x) nounwind {
+define i1 @extractelt_nxv2i1_idx0(ptr %x) nounwind {
 ; CHECK-LABEL: extractelt_nxv2i1_idx0:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
@@ -234,13 +234,13 @@ define i1 @extractelt_nxv2i1_idx0(<vscale x 2 x i8>* %x) nounwind {
 ; CHECK-NEXT:    vfirst.m a0, v8
 ; CHECK-NEXT:    seqz a0, a0
 ; CHECK-NEXT:    ret
-  %a = load <vscale x 2 x i8>, <vscale x 2 x i8>* %x
+  %a = load <vscale x 2 x i8>, ptr %x
   %b = icmp eq <vscale x 2 x i8> %a, zeroinitializer
   %c = extractelement <vscale x 2 x i1> %b, i64 0
   ret i1 %c
 }
 
-define i1 @extractelt_nxv4i1_idx0(<vscale x 4 x i8>* %x) nounwind {
+define i1 @extractelt_nxv4i1_idx0(ptr %x) nounwind {
 ; CHECK-LABEL: extractelt_nxv4i1_idx0:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
@@ -249,13 +249,13 @@ define i1 @extractelt_nxv4i1_idx0(<vscale x 4 x i8>* %x) nounwind {
 ; CHECK-NEXT:    vfirst.m a0, v8
 ; CHECK-NEXT:    seqz a0, a0
 ; CHECK-NEXT:    ret
-  %a = load <vscale x 4 x i8>, <vscale x 4 x i8>* %x
+  %a = load <vscale x 4 x i8>, ptr %x
   %b = icmp eq <vscale x 4 x i8> %a, zeroinitializer
   %c = extractelement <vscale x 4 x i1> %b, i64 0
   ret i1 %c
 }
 
-define i1 @extractelt_nxv8i1_idx0(<vscale x 8 x i8>* %x) nounwind {
+define i1 @extractelt_nxv8i1_idx0(ptr %x) nounwind {
 ; CHECK-LABEL: extractelt_nxv8i1_idx0:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl1r.v v8, (a0)
@@ -264,13 +264,13 @@ define i1 @extractelt_nxv8i1_idx0(<vscale x 8 x i8>* %x) nounwind {
 ; CHECK-NEXT:    vfirst.m a0, v8
 ; CHECK-NEXT:    seqz a0, a0
 ; CHECK-NEXT:    ret
-  %a = load <vscale x 8 x i8>, <vscale x 8 x i8>* %x
+  %a = load <vscale x 8 x i8>, ptr %x
   %b = icmp eq <vscale x 8 x i8> %a, zeroinitializer
   %c = extractelement <vscale x 8 x i1> %b, i64 0
   ret i1 %c
 }
 
-define i1 @extractelt_nxv16i1_idx0(<vscale x 16 x i8>* %x) nounwind {
+define i1 @extractelt_nxv16i1_idx0(ptr %x) nounwind {
 ; CHECK-LABEL: extractelt_nxv16i1_idx0:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl2r.v v8, (a0)
@@ -279,13 +279,13 @@ define i1 @extractelt_nxv16i1_idx0(<vscale x 16 x i8>* %x) nounwind {
 ; CHECK-NEXT:    vfirst.m a0, v10
 ; CHECK-NEXT:    seqz a0, a0
 ; CHECK-NEXT:    ret
-  %a = load <vscale x 16 x i8>, <vscale x 16 x i8>* %x
+  %a = load <vscale x 16 x i8>, ptr %x
   %b = icmp eq <vscale x 16 x i8> %a, zeroinitializer
   %c = extractelement <vscale x 16 x i1> %b, i64 0
   ret i1 %c
 }
 
-define i1 @extractelt_nxv32i1_idx0(<vscale x 32 x i8>* %x) nounwind {
+define i1 @extractelt_nxv32i1_idx0(ptr %x) nounwind {
 ; CHECK-LABEL: extractelt_nxv32i1_idx0:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl4r.v v8, (a0)
@@ -294,13 +294,13 @@ define i1 @extractelt_nxv32i1_idx0(<vscale x 32 x i8>* %x) nounwind {
 ; CHECK-NEXT:    vfirst.m a0, v12
 ; CHECK-NEXT:    seqz a0, a0
 ; CHECK-NEXT:    ret
-  %a = load <vscale x 32 x i8>, <vscale x 32 x i8>* %x
+  %a = load <vscale x 32 x i8>, ptr %x
   %b = icmp eq <vscale x 32 x i8> %a, zeroinitializer
   %c = extractelement <vscale x 32 x i1> %b, i64 0
   ret i1 %c
 }
 
-define i1 @extractelt_nxv64i1_idx0(<vscale x 64 x i8>* %x) nounwind {
+define i1 @extractelt_nxv64i1_idx0(ptr %x) nounwind {
 ; CHECK-LABEL: extractelt_nxv64i1_idx0:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl8r.v v8, (a0)
@@ -309,7 +309,7 @@ define i1 @extractelt_nxv64i1_idx0(<vscale x 64 x i8>* %x) nounwind {
 ; CHECK-NEXT:    vfirst.m a0, v16
 ; CHECK-NEXT:    seqz a0, a0
 ; CHECK-NEXT:    ret
-  %a = load <vscale x 64 x i8>, <vscale x 64 x i8>* %x
+  %a = load <vscale x 64 x i8>, ptr %x
   %b = icmp eq <vscale x 64 x i8> %a, zeroinitializer
   %c = extractelement <vscale x 64 x i1> %b, i64 0
   ret i1 %c

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-emergency-slot.mir b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-emergency-slot.mir
index 5fbfbc9f37ce5..0cf2486308c4e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-emergency-slot.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-emergency-slot.mir
@@ -6,7 +6,7 @@
   target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n64-S128"
   target triple = "riscv64"
 
-  define weak_odr dso_local void @fixedlen_vector_spillslot(i8* %ay) nounwind {
+  define weak_odr dso_local void @fixedlen_vector_spillslot(ptr %ay) nounwind {
   ; CHECK-LABEL: fixedlen_vector_spillslot:
   ; CHECK:       # %bb.0: # %entry
   ; CHECK-NEXT:    addi sp, sp, -48

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll
index 924094c00fe7b..57b219343c3e3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll
@@ -5,7 +5,7 @@
 
 ; Tests that a floating-point build_vector doesn't try and generate a VID
 ; instruction
-define void @buildvec_no_vid_v4f32(<4 x float>* %x) {
+define void @buildvec_no_vid_v4f32(ptr %x) {
 ; CHECK-LABEL: buildvec_no_vid_v4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI0_0)
@@ -14,7 +14,7 @@ define void @buildvec_no_vid_v4f32(<4 x float>* %x) {
 ; CHECK-NEXT:    vle32.v v8, (a1)
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
-  store <4 x float> <float 0.0, float 4.0, float 0.0, float 2.0>, <4 x float>* %x
+  store <4 x float> <float 0.0, float 4.0, float 0.0, float 2.0>, ptr %x
   ret void
 }
 
@@ -50,7 +50,7 @@ define <4 x float> @hang_when_merging_stores_after_legalization(<8 x float> %x,
   ret <4 x float> %z
 }
 
-define void @buildvec_dominant0_v2f32(<2 x float>* %x) {
+define void @buildvec_dominant0_v2f32(ptr %x) {
 ; CHECK-LABEL: buildvec_dominant0_v2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -58,14 +58,14 @@ define void @buildvec_dominant0_v2f32(<2 x float>* %x) {
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
-  store <2 x float> <float 0.0, float 1.0>, <2 x float>* %x
+  store <2 x float> <float 0.0, float 1.0>, ptr %x
   ret void
 }
 
 ; We don't want to lower this to the insertion of two scalar elements as above,
 ; as each would require their own load from the constant pool.
 
-define void @buildvec_dominant1_v2f32(<2 x float>* %x) {
+define void @buildvec_dominant1_v2f32(ptr %x) {
 ; CHECK-LABEL: buildvec_dominant1_v2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -74,11 +74,11 @@ define void @buildvec_dominant1_v2f32(<2 x float>* %x) {
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
-  store <2 x float> <float 1.0, float 2.0>, <2 x float>* %x
+  store <2 x float> <float 1.0, float 2.0>, ptr %x
   ret void
 }
 
-define void @buildvec_dominant0_v4f32(<4 x float>* %x) {
+define void @buildvec_dominant0_v4f32(ptr %x) {
 ; CHECK-LABEL: buildvec_dominant0_v4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, 262144
@@ -90,11 +90,11 @@ define void @buildvec_dominant0_v4f32(<4 x float>* %x) {
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
-  store <4 x float> <float 2.0, float 2.0, float 0.0, float 2.0>, <4 x float>* %x
+  store <4 x float> <float 2.0, float 2.0, float 0.0, float 2.0>, ptr %x
   ret void
 }
 
-define void @buildvec_dominant1_v4f32(<4 x float>* %x, float %f) {
+define void @buildvec_dominant1_v4f32(ptr %x, float %f) {
 ; CHECK-LABEL: buildvec_dominant1_v4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -109,11 +109,11 @@ define void @buildvec_dominant1_v4f32(<4 x float>* %x, float %f) {
   %v1 = insertelement <4 x float> %v0, float 0.0, i32 1
   %v2 = insertelement <4 x float> %v1, float %f, i32 2
   %v3 = insertelement <4 x float> %v2, float %f, i32 3
-  store <4 x float> %v3, <4 x float>* %x
+  store <4 x float> %v3, ptr %x
   ret void
 }
 
-define void @buildvec_dominant2_v4f32(<4 x float>* %x, float %f) {
+define void @buildvec_dominant2_v4f32(ptr %x, float %f) {
 ; CHECK-LABEL: buildvec_dominant2_v4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, 262144
@@ -129,11 +129,11 @@ define void @buildvec_dominant2_v4f32(<4 x float>* %x, float %f) {
   %v1 = insertelement <4 x float> %v0, float 2.0, i32 1
   %v2 = insertelement <4 x float> %v1, float %f, i32 2
   %v3 = insertelement <4 x float> %v2, float %f, i32 3
-  store <4 x float> %v3, <4 x float>* %x
+  store <4 x float> %v3, ptr %x
   ret void
 }
 
-define void @buildvec_merge0_v4f32(<4 x float>* %x, float %f) {
+define void @buildvec_merge0_v4f32(ptr %x, float %f) {
 ; CHECK-LABEL: buildvec_merge0_v4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -147,7 +147,7 @@ define void @buildvec_merge0_v4f32(<4 x float>* %x, float %f) {
   %v1 = insertelement <4 x float> %v0, float 2.0, i32 1
   %v2 = insertelement <4 x float> %v1, float 2.0, i32 2
   %v3 = insertelement <4 x float> %v2, float %f, i32 3
-  store <4 x float> %v3, <4 x float>* %x
+  store <4 x float> %v3, ptr %x
   ret void
 }
 
@@ -205,7 +205,7 @@ define <8 x float> @splat_idx_v8f32(<8 x float> %v, i64 %idx) {
 }
 
 ; Test that we pull the vlse of the constant pool out of the loop.
-define dso_local void @splat_load_licm(float* %0) {
+define dso_local void @splat_load_licm(ptr %0) {
 ; CHECK-LABEL: splat_load_licm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, 1
@@ -223,9 +223,9 @@ define dso_local void @splat_load_licm(float* %0) {
 
 2:                                                ; preds = %2, %1
   %3 = phi i32 [ 0, %1 ], [ %6, %2 ]
-  %4 = getelementptr inbounds float, float* %0, i32 %3
-  %5 = bitcast float* %4 to <4 x float>*
-  store <4 x float> <float 3.000000e+00, float 3.000000e+00, float 3.000000e+00, float 3.000000e+00>, <4 x float>* %5, align 4
+  %4 = getelementptr inbounds float, ptr %0, i32 %3
+  %5 = bitcast ptr %4 to ptr
+  store <4 x float> <float 3.000000e+00, float 3.000000e+00, float 3.000000e+00, float 3.000000e+00>, ptr %5, align 4
   %6 = add nuw i32 %3, 4
   %7 = icmp eq i32 %6, 1024
   br i1 %7, label %8, label %2

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
index a77c49c942561..5d045877e5229 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
@@ -503,7 +503,7 @@ define <vscale x 8 x i1> @insert_nxv8i1_v8i1_16(<vscale x 8 x i1> %v, ptr %svp)
 
 declare <vscale x 16 x i64> @llvm.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64>, <2 x i64>, i64)
 
-define void @insert_v2i64_nxv16i64(ptr %psv0, ptr %psv1, <vscale x 16 x i64>* %out) {
+define void @insert_v2i64_nxv16i64(ptr %psv0, ptr %psv1, ptr %out) {
 ; CHECK-LABEL: insert_v2i64_nxv16i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
@@ -517,11 +517,11 @@ define void @insert_v2i64_nxv16i64(ptr %psv0, ptr %psv1, <vscale x 16 x i64>* %o
   %sv1 = load <2 x i64>, ptr %psv1
   %v0 = call <vscale x 16 x i64> @llvm.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> undef, <2 x i64> %sv0, i64 0)
   %v = call <vscale x 16 x i64> @llvm.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> %v0, <2 x i64> %sv1, i64 4)
-  store <vscale x 16 x i64> %v, <vscale x 16 x i64>* %out
+  store <vscale x 16 x i64> %v, ptr %out
   ret void
 }
 
-define void @insert_v2i64_nxv16i64_lo0(ptr %psv, <vscale x 16 x i64>* %out) {
+define void @insert_v2i64_nxv16i64_lo0(ptr %psv, ptr %out) {
 ; CHECK-LABEL: insert_v2i64_nxv16i64_lo0:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
@@ -530,11 +530,11 @@ define void @insert_v2i64_nxv16i64_lo0(ptr %psv, <vscale x 16 x i64>* %out) {
 ; CHECK-NEXT:    ret
   %sv = load <2 x i64>, ptr %psv
   %v = call <vscale x 16 x i64> @llvm.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> undef, <2 x i64> %sv, i64 0)
-  store <vscale x 16 x i64> %v, <vscale x 16 x i64>* %out
+  store <vscale x 16 x i64> %v, ptr %out
   ret void
 }
 
-define void @insert_v2i64_nxv16i64_lo2(ptr %psv, <vscale x 16 x i64>* %out) {
+define void @insert_v2i64_nxv16i64_lo2(ptr %psv, ptr %out) {
 ; CHECK-LABEL: insert_v2i64_nxv16i64_lo2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
@@ -545,13 +545,13 @@ define void @insert_v2i64_nxv16i64_lo2(ptr %psv, <vscale x 16 x i64>* %out) {
 ; CHECK-NEXT:    ret
   %sv = load <2 x i64>, ptr %psv
   %v = call <vscale x 16 x i64> @llvm.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> undef, <2 x i64> %sv, i64 2)
-  store <vscale x 16 x i64> %v, <vscale x 16 x i64>* %out
+  store <vscale x 16 x i64> %v, ptr %out
   ret void
 }
 
 ; Check we don't mistakenly optimize this: we don't know whether this is
 ; inserted into the low or high split vector.
-define void @insert_v2i64_nxv16i64_hi(ptr %psv, <vscale x 16 x i64>* %out) {
+define void @insert_v2i64_nxv16i64_hi(ptr %psv, ptr %out) {
 ; RV32-LABEL: insert_v2i64_nxv16i64_hi:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    addi sp, sp, -80
@@ -619,7 +619,7 @@ define void @insert_v2i64_nxv16i64_hi(ptr %psv, <vscale x 16 x i64>* %out) {
 ; RV64-NEXT:    ret
   %sv = load <2 x i64>, ptr %psv
   %v = call <vscale x 16 x i64> @llvm.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> undef, <2 x i64> %sv, i64 8)
-  store <vscale x 16 x i64> %v, <vscale x 16 x i64>* %out
+  store <vscale x 16 x i64> %v, ptr %out
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/frameindex-addr.ll b/llvm/test/CodeGen/RISCV/rvv/frameindex-addr.ll
index ea1e65d5fe829..5c592dd1a2d68 100644
--- a/llvm/test/CodeGen/RISCV/rvv/frameindex-addr.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/frameindex-addr.ll
@@ -7,7 +7,7 @@
 
 declare void @llvm.riscv.vse.nxv1i64(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   i64);
 
 define i64 @test(<vscale x 1 x i64> %0) nounwind {
@@ -23,11 +23,11 @@ define i64 @test(<vscale x 1 x i64> %0) nounwind {
   ; CHECK-NEXT:   PseudoRET implicit $x10
 entry:
   %a = alloca i64
-  %b = bitcast i64* %a to <vscale x 1 x i64>*
+  %b = bitcast ptr %a to ptr
   call void @llvm.riscv.vse.nxv1i64(
     <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %b,
+    ptr %b,
     i64 1)
-  %c = load i64, i64* %a
+  %c = load i64, ptr %a
   ret i64 %c
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/implicit-def-copy.ll b/llvm/test/CodeGen/RISCV/rvv/implicit-def-copy.ll
index 1c48fcf8865a7..292f1deb2cce8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/implicit-def-copy.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/implicit-def-copy.ll
@@ -3,7 +3,7 @@
 
 ; Make sure we don't create a COPY instruction for IMPLICIT_DEF.
 
-define <vscale x 8 x i64> @vpload_nxv8i64(<vscale x 8 x i64>* %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) #1 {
+define <vscale x 8 x i64> @vpload_nxv8i64(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) #1 {
   ; CHECK-LABEL: name: vpload_nxv8i64
   ; CHECK: bb.0 (%ir-block.0):
   ; CHECK-NEXT:   liveins: $x10, $v0, $x11
@@ -15,8 +15,8 @@ define <vscale x 8 x i64> @vpload_nxv8i64(<vscale x 8 x i64>* %ptr, <vscale x 8
   ; CHECK-NEXT:   [[PseudoVLE64_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64_V_M8_MASK $noreg, [[COPY2]], $v0, [[COPY]], 6 /* e64 */, 1 /* ta, mu */ :: (load unknown-size from %ir.ptr, align 64)
   ; CHECK-NEXT:   $v8m8 = COPY [[PseudoVLE64_V_M8_MASK]]
   ; CHECK-NEXT:   PseudoRET implicit $v8m8
-  %load = call <vscale x 8 x i64> @llvm.vp.load.nxv8i64.p0(<vscale x 8 x i64>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
+  %load = call <vscale x 8 x i64> @llvm.vp.load.nxv8i64.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i64> %load
 }
 
-declare <vscale x 8 x i64> @llvm.vp.load.nxv8i64.p0(<vscale x 8 x i64>*, <vscale x 8 x i1>, i32)
+declare <vscale x 8 x i64> @llvm.vp.load.nxv8i64.p0(ptr, <vscale x 8 x i1>, i32)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll
index 8a368e7161c3f..0f3f57a0dec59 100644
--- a/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll
@@ -457,7 +457,7 @@ define <vscale x 4 x i1> @insert_nxv4i1_nxv1i1_2(<vscale x 4 x i1> %v, <vscale x
 
 declare <vscale x 16 x i64> @llvm.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64>, <vscale x 8 x i64>, i64)
 
-define void @insert_nxv8i64_nxv16i64(<vscale x 8 x i64> %sv0, <vscale x 8 x i64> %sv1, <vscale x 16 x i64>* %out) {
+define void @insert_nxv8i64_nxv16i64(<vscale x 8 x i64> %sv0, <vscale x 8 x i64> %sv1, ptr %out) {
 ; CHECK-LABEL: insert_nxv8i64_nxv16i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vs8r.v v8, (a0)
@@ -468,21 +468,21 @@ define void @insert_nxv8i64_nxv16i64(<vscale x 8 x i64> %sv0, <vscale x 8 x i64>
 ; CHECK-NEXT:    ret
   %v0 = call <vscale x 16 x i64> @llvm.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64> undef, <vscale x 8 x i64> %sv0, i64 0)
   %v = call <vscale x 16 x i64> @llvm.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64> %v0, <vscale x 8 x i64> %sv1, i64 8)
-  store <vscale x 16 x i64> %v, <vscale x 16 x i64>* %out
+  store <vscale x 16 x i64> %v, ptr %out
   ret void
 }
 
-define void @insert_nxv8i64_nxv16i64_lo(<vscale x 8 x i64> %sv0, <vscale x 16 x i64>* %out) {
+define void @insert_nxv8i64_nxv16i64_lo(<vscale x 8 x i64> %sv0, ptr %out) {
 ; CHECK-LABEL: insert_nxv8i64_nxv16i64_lo:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vs8r.v v8, (a0)
 ; CHECK-NEXT:    ret
   %v = call <vscale x 16 x i64> @llvm.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64> undef, <vscale x 8 x i64> %sv0, i64 0)
-  store <vscale x 16 x i64> %v, <vscale x 16 x i64>* %out
+  store <vscale x 16 x i64> %v, ptr %out
   ret void
 }
 
-define void @insert_nxv8i64_nxv16i64_hi(<vscale x 8 x i64> %sv0, <vscale x 16 x i64>* %out) {
+define void @insert_nxv8i64_nxv16i64_hi(<vscale x 8 x i64> %sv0, ptr %out) {
 ; CHECK-LABEL: insert_nxv8i64_nxv16i64_hi:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a1, vlenb
@@ -491,7 +491,7 @@ define void @insert_nxv8i64_nxv16i64_hi(<vscale x 8 x i64> %sv0, <vscale x 16 x
 ; CHECK-NEXT:    vs8r.v v8, (a0)
 ; CHECK-NEXT:    ret
   %v = call <vscale x 16 x i64> @llvm.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64> undef, <vscale x 8 x i64> %sv0, i64 8)
-  store <vscale x 16 x i64> %v, <vscale x 16 x i64>* %out
+  store <vscale x 16 x i64> %v, ptr %out
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/legalize-load-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/legalize-load-sdnode.ll
index 0aa9cc3b68995..eada90e055df9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/legalize-load-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/legalize-load-sdnode.ll
@@ -4,7 +4,7 @@
 
 ; Check that we are able to legalize scalable-vector loads that require widening.
 
-define <vscale x 3 x i8> @load_nxv3i8(<vscale x 3 x i8>* %ptr) {
+define <vscale x 3 x i8> @load_nxv3i8(ptr %ptr) {
 ; CHECK-LABEL: load_nxv3i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a1, vlenb
@@ -14,11 +14,11 @@ define <vscale x 3 x i8> @load_nxv3i8(<vscale x 3 x i8>* %ptr) {
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %v = load <vscale x 3 x i8>, <vscale x 3 x i8>* %ptr
+  %v = load <vscale x 3 x i8>, ptr %ptr
   ret <vscale x 3 x i8> %v
 }
 
-define <vscale x 5 x half> @load_nxv5f16(<vscale x 5 x half>* %ptr) {
+define <vscale x 5 x half> @load_nxv5f16(ptr %ptr) {
 ; CHECK-LABEL: load_nxv5f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a1, vlenb
@@ -28,11 +28,11 @@ define <vscale x 5 x half> @load_nxv5f16(<vscale x 5 x half>* %ptr) {
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %v = load <vscale x 5 x half>, <vscale x 5 x half>* %ptr
+  %v = load <vscale x 5 x half>, ptr %ptr
   ret <vscale x 5 x half> %v
 }
 
-define <vscale x 7 x half> @load_nxv7f16(<vscale x 7 x half>* %ptr, <vscale x 7 x half>* %out) {
+define <vscale x 7 x half> @load_nxv7f16(ptr %ptr, ptr %out) {
 ; CHECK-LABEL: load_nxv7f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a2, vlenb
@@ -42,7 +42,7 @@ define <vscale x 7 x half> @load_nxv7f16(<vscale x 7 x half>* %ptr, <vscale x 7
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    vse16.v v8, (a1)
 ; CHECK-NEXT:    ret
-  %v = load <vscale x 7 x half>, <vscale x 7 x half>* %ptr
-  store <vscale x 7 x half> %v, <vscale x 7 x half>* %out
+  %v = load <vscale x 7 x half>, ptr %ptr
+  store <vscale x 7 x half> %v, ptr %out
   ret <vscale x 7 x half> %v
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/legalize-store-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/legalize-store-sdnode.ll
index 5121c0bf953ce..4b4cffc461d46 100644
--- a/llvm/test/CodeGen/RISCV/rvv/legalize-store-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/legalize-store-sdnode.ll
@@ -4,7 +4,7 @@
 
 ; Check that we are able to legalize scalable-vector stores that require widening.
 
-define void @store_nxv3i8(<vscale x 3 x i8> %val, <vscale x 3 x i8>* %ptr) {
+define void @store_nxv3i8(<vscale x 3 x i8> %val, ptr %ptr) {
 ; CHECK-LABEL: store_nxv3i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a1, vlenb
@@ -14,11 +14,11 @@ define void @store_nxv3i8(<vscale x 3 x i8> %val, <vscale x 3 x i8>* %ptr) {
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
-  store <vscale x 3 x i8> %val, <vscale x 3 x i8>* %ptr
+  store <vscale x 3 x i8> %val, ptr %ptr
   ret void
 }
 
-define void @store_nxv7f64(<vscale x 7 x double> %val, <vscale x 7 x double>* %ptr) {
+define void @store_nxv7f64(<vscale x 7 x double> %val, ptr %ptr) {
 ; CHECK-LABEL: store_nxv7f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a1, vlenb
@@ -27,6 +27,6 @@ define void @store_nxv7f64(<vscale x 7 x double> %val, <vscale x 7 x double>* %p
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
-  store <vscale x 7 x double> %val, <vscale x 7 x double>* %ptr
+  store <vscale x 7 x double> %val, ptr %ptr
   ret void
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/load-mask.ll b/llvm/test/CodeGen/RISCV/rvv/load-mask.ll
index 86f7f6de53bed..97ef0cfc9f1e2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/load-mask.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/load-mask.ll
@@ -4,86 +4,86 @@
 ; RUN: llc -mtriple riscv64 -mattr=+v %s -o - \
 ; RUN:     -verify-machineinstrs | FileCheck %s
 
-define void @test_load_mask_64(<vscale x 64 x i1>* %pa, <vscale x 64 x i1>* %pb) {
+define void @test_load_mask_64(ptr %pa, ptr %pb) {
 ; CHECK-LABEL: test_load_mask_64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a2, zero, e8, m8, ta, ma
 ; CHECK-NEXT:    vlm.v v8, (a0)
 ; CHECK-NEXT:    vsm.v v8, (a1)
 ; CHECK-NEXT:    ret
-  %a = load <vscale x 64 x i1>, <vscale x 64 x i1>* %pa
-  store <vscale x 64 x i1> %a, <vscale x 64 x i1>* %pb
+  %a = load <vscale x 64 x i1>, ptr %pa
+  store <vscale x 64 x i1> %a, ptr %pb
   ret void
 }
 
-define void @test_load_mask_32(<vscale x 32 x i1>* %pa, <vscale x 32 x i1>* %pb) {
+define void @test_load_mask_32(ptr %pa, ptr %pb) {
 ; CHECK-LABEL: test_load_mask_32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a2, zero, e8, m4, ta, ma
 ; CHECK-NEXT:    vlm.v v8, (a0)
 ; CHECK-NEXT:    vsm.v v8, (a1)
 ; CHECK-NEXT:    ret
-  %a = load <vscale x 32 x i1>, <vscale x 32 x i1>* %pa
-  store <vscale x 32 x i1> %a, <vscale x 32 x i1>* %pb
+  %a = load <vscale x 32 x i1>, ptr %pa
+  store <vscale x 32 x i1> %a, ptr %pb
   ret void
 }
 
-define void @test_load_mask_16(<vscale x 16 x i1>* %pa, <vscale x 16 x i1>* %pb) {
+define void @test_load_mask_16(ptr %pa, ptr %pb) {
 ; CHECK-LABEL: test_load_mask_16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a2, zero, e8, m2, ta, ma
 ; CHECK-NEXT:    vlm.v v8, (a0)
 ; CHECK-NEXT:    vsm.v v8, (a1)
 ; CHECK-NEXT:    ret
-  %a = load <vscale x 16 x i1>, <vscale x 16 x i1>* %pa
-  store <vscale x 16 x i1> %a, <vscale x 16 x i1>* %pb
+  %a = load <vscale x 16 x i1>, ptr %pa
+  store <vscale x 16 x i1> %a, ptr %pb
   ret void
 }
 
-define void @test_load_mask_8(<vscale x 8 x i1>* %pa, <vscale x 8 x i1>* %pb) {
+define void @test_load_mask_8(ptr %pa, ptr %pb) {
 ; CHECK-LABEL: test_load_mask_8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a2, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vlm.v v8, (a0)
 ; CHECK-NEXT:    vsm.v v8, (a1)
 ; CHECK-NEXT:    ret
-  %a = load <vscale x 8 x i1>, <vscale x 8 x i1>* %pa
-  store <vscale x 8 x i1> %a, <vscale x 8 x i1>* %pb
+  %a = load <vscale x 8 x i1>, ptr %pa
+  store <vscale x 8 x i1> %a, ptr %pb
   ret void
 }
 
-define void @test_load_mask_4(<vscale x 4 x i1>* %pa, <vscale x 4 x i1>* %pb) {
+define void @test_load_mask_4(ptr %pa, ptr %pb) {
 ; CHECK-LABEL: test_load_mask_4:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a2, zero, e8, mf2, ta, ma
 ; CHECK-NEXT:    vlm.v v8, (a0)
 ; CHECK-NEXT:    vsm.v v8, (a1)
 ; CHECK-NEXT:    ret
-  %a = load <vscale x 4 x i1>, <vscale x 4 x i1>* %pa
-  store <vscale x 4 x i1> %a, <vscale x 4 x i1>* %pb
+  %a = load <vscale x 4 x i1>, ptr %pa
+  store <vscale x 4 x i1> %a, ptr %pb
   ret void
 }
 
-define void @test_load_mask_2(<vscale x 2 x i1>* %pa, <vscale x 2 x i1>* %pb) {
+define void @test_load_mask_2(ptr %pa, ptr %pb) {
 ; CHECK-LABEL: test_load_mask_2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a2, zero, e8, mf4, ta, ma
 ; CHECK-NEXT:    vlm.v v8, (a0)
 ; CHECK-NEXT:    vsm.v v8, (a1)
 ; CHECK-NEXT:    ret
-  %a = load <vscale x 2 x i1>, <vscale x 2 x i1>* %pa
-  store <vscale x 2 x i1> %a, <vscale x 2 x i1>* %pb
+  %a = load <vscale x 2 x i1>, ptr %pa
+  store <vscale x 2 x i1> %a, ptr %pb
   ret void
 }
 
-define void @test_load_mask_1(<vscale x 1 x i1>* %pa, <vscale x 1 x i1>* %pb) {
+define void @test_load_mask_1(ptr %pa, ptr %pb) {
 ; CHECK-LABEL: test_load_mask_1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a2, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vlm.v v8, (a0)
 ; CHECK-NEXT:    vsm.v v8, (a1)
 ; CHECK-NEXT:    ret
-  %a = load <vscale x 1 x i1>, <vscale x 1 x i1>* %pa
-  store <vscale x 1 x i1> %a, <vscale x 1 x i1>* %pb
+  %a = load <vscale x 1 x i1>, ptr %pa
+  store <vscale x 1 x i1> %a, ptr %pb
   ret void
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/localvar.ll b/llvm/test/CodeGen/RISCV/rvv/localvar.ll
index 1ee88f897b6eb..90bf29d776011 100644
--- a/llvm/test/CodeGen/RISCV/rvv/localvar.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/localvar.ll
@@ -25,8 +25,8 @@ define void @local_var_mf8() {
 ; RV64IV-NEXT:    ret
   %local0 = alloca <vscale x 1 x i8>
   %local1 = alloca <vscale x 1 x i8>
-  load volatile <vscale x 1 x i8>, <vscale x 1 x i8>* %local0
-  load volatile <vscale x 1 x i8>, <vscale x 1 x i8>* %local1
+  load volatile <vscale x 1 x i8>, ptr %local0
+  load volatile <vscale x 1 x i8>, ptr %local1
   ret void
 }
 
@@ -52,8 +52,8 @@ define void @local_var_m1() {
 ; RV64IV-NEXT:    ret
   %local0 = alloca <vscale x 8 x i8>
   %local1 = alloca <vscale x 8 x i8>
-  load volatile <vscale x 8 x i8>, <vscale x 8 x i8>* %local0
-  load volatile <vscale x 8 x i8>, <vscale x 8 x i8>* %local1
+  load volatile <vscale x 8 x i8>, ptr %local0
+  load volatile <vscale x 8 x i8>, ptr %local1
   ret void
 }
 
@@ -80,8 +80,8 @@ define void @local_var_m2() {
 ; RV64IV-NEXT:    ret
   %local0 = alloca <vscale x 16 x i8>
   %local1 = alloca <vscale x 16 x i8>
-  load volatile <vscale x 16 x i8>, <vscale x 16 x i8>* %local0
-  load volatile <vscale x 16 x i8>, <vscale x 16 x i8>* %local1
+  load volatile <vscale x 16 x i8>, ptr %local0
+  load volatile <vscale x 16 x i8>, ptr %local1
   ret void
 }
 
@@ -114,8 +114,8 @@ define void @local_var_m4() {
 ; RV64IV-NEXT:    ret
   %local0 = alloca <vscale x 32 x i8>
   %local1 = alloca <vscale x 32 x i8>
-  load volatile <vscale x 32 x i8>, <vscale x 32 x i8>* %local0
-  load volatile <vscale x 32 x i8>, <vscale x 32 x i8>* %local1
+  load volatile <vscale x 32 x i8>, ptr %local0
+  load volatile <vscale x 32 x i8>, ptr %local1
   ret void
 }
 
@@ -148,8 +148,8 @@ define void @local_var_m8() {
 ; RV64IV-NEXT:    ret
   %local0 = alloca <vscale x 64 x i8>
   %local1 = alloca <vscale x 64 x i8>
-  load volatile <vscale x 64 x i8>, <vscale x 64 x i8>* %local0
-  load volatile <vscale x 64 x i8>, <vscale x 64 x i8>* %local1
+  load volatile <vscale x 64 x i8>, ptr %local0
+  load volatile <vscale x 64 x i8>, ptr %local1
   ret void
 }
 
@@ -180,10 +180,10 @@ define void @local_var_m2_mix_local_scalar() {
   %local0 = alloca <vscale x 16 x i8>
   %local1 = alloca <vscale x 16 x i8>
   %local_scalar1 = alloca i32
-  load volatile i32, i32* %local_scalar0
-  load volatile <vscale x 16 x i8>, <vscale x 16 x i8>* %local0
-  load volatile <vscale x 16 x i8>, <vscale x 16 x i8>* %local1
-  load volatile i32, i32* %local_scalar1
+  load volatile i32, ptr %local_scalar0
+  load volatile <vscale x 16 x i8>, ptr %local0
+  load volatile <vscale x 16 x i8>, ptr %local1
+  load volatile i32, ptr %local_scalar1
   ret void
 }
 
@@ -231,9 +231,9 @@ define void @local_var_m2_with_varsize_object(i64 %n) {
   %1 = alloca i8, i64 %n
   %2 = alloca <vscale x 16 x i8>
   %3 = alloca <vscale x 16 x i8>
-  call void @notdead(i8* %1, <vscale x 16 x i8>* %2)
-  load volatile <vscale x 16 x i8>, <vscale x 16 x i8>* %2
-  load volatile <vscale x 16 x i8>, <vscale x 16 x i8>* %3
+  call void @notdead(ptr %1, ptr %2)
+  load volatile <vscale x 16 x i8>, ptr %2
+  load volatile <vscale x 16 x i8>, ptr %3
   ret void
 }
 
@@ -289,11 +289,11 @@ define void @local_var_m2_with_bp(i64 %n) {
   %local0 = alloca <vscale x 16 x i8>
   %local1 = alloca <vscale x 16 x i8>
   %local_scalar1 = alloca i32
-  call void @notdead2(i8* %1, i32* %2, <vscale x 16 x i8>* %local0)
-  load volatile i32, i32* %local_scalar0
-  load volatile <vscale x 16 x i8>, <vscale x 16 x i8>* %local0
-  load volatile <vscale x 16 x i8>, <vscale x 16 x i8>* %local1
-  load volatile i32, i32* %local_scalar1
+  call void @notdead2(ptr %1, ptr %2, ptr %local0)
+  load volatile i32, ptr %local_scalar0
+  load volatile <vscale x 16 x i8>, ptr %local0
+  load volatile <vscale x 16 x i8>, ptr %local1
+  load volatile i32, ptr %local_scalar1
   ret void
 }
 
@@ -318,5 +318,5 @@ define i64 @fixed_object(i64 %0, i64 %1, i64 %2, i64 %3, i64 %4, i64 %5, i64 %6,
   ret i64 %8
 }
 
-declare void @notdead(i8*, <vscale x 16 x i8>*)
-declare void @notdead2(i8*, i32*, <vscale x 16 x i8>*)
+declare void @notdead(ptr, ptr)
+declare void @notdead2(ptr, ptr, ptr)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/masked-load-fp.ll b/llvm/test/CodeGen/RISCV/rvv/masked-load-fp.ll
index 8c4064e7ed630..df1bd889c1042 100644
--- a/llvm/test/CodeGen/RISCV/rvv/masked-load-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/masked-load-fp.ll
@@ -2,167 +2,167 @@
 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s
 
-define <vscale x 1 x half> @masked_load_nxv1f16(<vscale x 1 x half>* %a, <vscale x 1 x i1> %mask) nounwind {
+define <vscale x 1 x half> @masked_load_nxv1f16(ptr %a, <vscale x 1 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_load_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 1 x half> @llvm.masked.load.nxv1f16(<vscale x 1 x half>* %a, i32 2, <vscale x 1 x i1> %mask, <vscale x 1 x half> undef)
+  %load = call <vscale x 1 x half> @llvm.masked.load.nxv1f16(ptr %a, i32 2, <vscale x 1 x i1> %mask, <vscale x 1 x half> undef)
   ret <vscale x 1 x half> %load
 }
-declare <vscale x 1 x half> @llvm.masked.load.nxv1f16(<vscale x 1 x half>*, i32, <vscale x 1 x i1>, <vscale x 1 x half>)
+declare <vscale x 1 x half> @llvm.masked.load.nxv1f16(ptr, i32, <vscale x 1 x i1>, <vscale x 1 x half>)
 
-define <vscale x 1 x float> @masked_load_nxv1f32(<vscale x 1 x float>* %a, <vscale x 1 x i1> %mask) nounwind {
+define <vscale x 1 x float> @masked_load_nxv1f32(ptr %a, <vscale x 1 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_load_nxv1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 1 x float> @llvm.masked.load.nxv1f32(<vscale x 1 x float>* %a, i32 4, <vscale x 1 x i1> %mask, <vscale x 1 x float> undef)
+  %load = call <vscale x 1 x float> @llvm.masked.load.nxv1f32(ptr %a, i32 4, <vscale x 1 x i1> %mask, <vscale x 1 x float> undef)
   ret <vscale x 1 x float> %load
 }
-declare <vscale x 1 x float> @llvm.masked.load.nxv1f32(<vscale x 1 x float>*, i32, <vscale x 1 x i1>, <vscale x 1 x float>)
+declare <vscale x 1 x float> @llvm.masked.load.nxv1f32(ptr, i32, <vscale x 1 x i1>, <vscale x 1 x float>)
 
-define <vscale x 1 x double> @masked_load_nxv1f64(<vscale x 1 x double>* %a, <vscale x 1 x i1> %mask) nounwind {
+define <vscale x 1 x double> @masked_load_nxv1f64(ptr %a, <vscale x 1 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_load_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 1 x double> @llvm.masked.load.nxv1f64(<vscale x 1 x double>* %a, i32 8, <vscale x 1 x i1> %mask, <vscale x 1 x double> undef)
+  %load = call <vscale x 1 x double> @llvm.masked.load.nxv1f64(ptr %a, i32 8, <vscale x 1 x i1> %mask, <vscale x 1 x double> undef)
   ret <vscale x 1 x double> %load
 }
-declare <vscale x 1 x double> @llvm.masked.load.nxv1f64(<vscale x 1 x double>*, i32, <vscale x 1 x i1>, <vscale x 1 x double>)
+declare <vscale x 1 x double> @llvm.masked.load.nxv1f64(ptr, i32, <vscale x 1 x i1>, <vscale x 1 x double>)
 
-define <vscale x 2 x half> @masked_load_nxv2f16(<vscale x 2 x half>* %a, <vscale x 2 x i1> %mask) nounwind {
+define <vscale x 2 x half> @masked_load_nxv2f16(ptr %a, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_load_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x half> @llvm.masked.load.nxv2f16(<vscale x 2 x half>* %a, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> undef)
+  %load = call <vscale x 2 x half> @llvm.masked.load.nxv2f16(ptr %a, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> undef)
   ret <vscale x 2 x half> %load
 }
-declare <vscale x 2 x half> @llvm.masked.load.nxv2f16(<vscale x 2 x half>*, i32, <vscale x 2 x i1>, <vscale x 2 x half>)
+declare <vscale x 2 x half> @llvm.masked.load.nxv2f16(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x half>)
 
-define <vscale x 2 x float> @masked_load_nxv2f32(<vscale x 2 x float>* %a, <vscale x 2 x i1> %mask) nounwind {
+define <vscale x 2 x float> @masked_load_nxv2f32(ptr %a, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_load_nxv2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x float> @llvm.masked.load.nxv2f32(<vscale x 2 x float>* %a, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> undef)
+  %load = call <vscale x 2 x float> @llvm.masked.load.nxv2f32(ptr %a, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> undef)
   ret <vscale x 2 x float> %load
 }
-declare <vscale x 2 x float> @llvm.masked.load.nxv2f32(<vscale x 2 x float>*, i32, <vscale x 2 x i1>, <vscale x 2 x float>)
+declare <vscale x 2 x float> @llvm.masked.load.nxv2f32(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x float>)
 
-define <vscale x 2 x double> @masked_load_nxv2f64(<vscale x 2 x double>* %a, <vscale x 2 x i1> %mask) nounwind {
+define <vscale x 2 x double> @masked_load_nxv2f64(ptr %a, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_load_nxv2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x double> @llvm.masked.load.nxv2f64(<vscale x 2 x double>* %a, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> undef)
+  %load = call <vscale x 2 x double> @llvm.masked.load.nxv2f64(ptr %a, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> undef)
   ret <vscale x 2 x double> %load
 }
-declare <vscale x 2 x double> @llvm.masked.load.nxv2f64(<vscale x 2 x double>*, i32, <vscale x 2 x i1>, <vscale x 2 x double>)
+declare <vscale x 2 x double> @llvm.masked.load.nxv2f64(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x double>)
 
-define <vscale x 4 x half> @masked_load_nxv4f16(<vscale x 4 x half>* %a, <vscale x 4 x i1> %mask) nounwind {
+define <vscale x 4 x half> @masked_load_nxv4f16(ptr %a, <vscale x 4 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_load_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 4 x half> @llvm.masked.load.nxv4f16(<vscale x 4 x half>* %a, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x half> undef)
+  %load = call <vscale x 4 x half> @llvm.masked.load.nxv4f16(ptr %a, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x half> undef)
   ret <vscale x 4 x half> %load
 }
-declare <vscale x 4 x half> @llvm.masked.load.nxv4f16(<vscale x 4 x half>*, i32, <vscale x 4 x i1>, <vscale x 4 x half>)
+declare <vscale x 4 x half> @llvm.masked.load.nxv4f16(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x half>)
 
-define <vscale x 4 x float> @masked_load_nxv4f32(<vscale x 4 x float>* %a, <vscale x 4 x i1> %mask) nounwind {
+define <vscale x 4 x float> @masked_load_nxv4f32(ptr %a, <vscale x 4 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_load_nxv4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 4 x float> @llvm.masked.load.nxv4f32(<vscale x 4 x float>* %a, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x float> undef)
+  %load = call <vscale x 4 x float> @llvm.masked.load.nxv4f32(ptr %a, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x float> undef)
   ret <vscale x 4 x float> %load
 }
-declare <vscale x 4 x float> @llvm.masked.load.nxv4f32(<vscale x 4 x float>*, i32, <vscale x 4 x i1>, <vscale x 4 x float>)
+declare <vscale x 4 x float> @llvm.masked.load.nxv4f32(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x float>)
 
-define <vscale x 4 x double> @masked_load_nxv4f64(<vscale x 4 x double>* %a, <vscale x 4 x i1> %mask) nounwind {
+define <vscale x 4 x double> @masked_load_nxv4f64(ptr %a, <vscale x 4 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_load_nxv4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 4 x double> @llvm.masked.load.nxv4f64(<vscale x 4 x double>* %a, i32 8, <vscale x 4 x i1> %mask, <vscale x 4 x double> undef)
+  %load = call <vscale x 4 x double> @llvm.masked.load.nxv4f64(ptr %a, i32 8, <vscale x 4 x i1> %mask, <vscale x 4 x double> undef)
   ret <vscale x 4 x double> %load
 }
-declare <vscale x 4 x double> @llvm.masked.load.nxv4f64(<vscale x 4 x double>*, i32, <vscale x 4 x i1>, <vscale x 4 x double>)
+declare <vscale x 4 x double> @llvm.masked.load.nxv4f64(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x double>)
 
-define <vscale x 8 x half> @masked_load_nxv8f16(<vscale x 8 x half>* %a, <vscale x 8 x i1> %mask) nounwind {
+define <vscale x 8 x half> @masked_load_nxv8f16(ptr %a, <vscale x 8 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_load_nxv8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 8 x half> @llvm.masked.load.nxv8f16(<vscale x 8 x half>* %a, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x half> undef)
+  %load = call <vscale x 8 x half> @llvm.masked.load.nxv8f16(ptr %a, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x half> undef)
   ret <vscale x 8 x half> %load
 }
-declare <vscale x 8 x half> @llvm.masked.load.nxv8f16(<vscale x 8 x half>*, i32, <vscale x 8 x i1>, <vscale x 8 x half>)
+declare <vscale x 8 x half> @llvm.masked.load.nxv8f16(ptr, i32, <vscale x 8 x i1>, <vscale x 8 x half>)
 
-define <vscale x 8 x float> @masked_load_nxv8f32(<vscale x 8 x float>* %a, <vscale x 8 x i1> %mask) nounwind {
+define <vscale x 8 x float> @masked_load_nxv8f32(ptr %a, <vscale x 8 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_load_nxv8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 8 x float> @llvm.masked.load.nxv8f32(<vscale x 8 x float>* %a, i32 4, <vscale x 8 x i1> %mask, <vscale x 8 x float> undef)
+  %load = call <vscale x 8 x float> @llvm.masked.load.nxv8f32(ptr %a, i32 4, <vscale x 8 x i1> %mask, <vscale x 8 x float> undef)
   ret <vscale x 8 x float> %load
 }
-declare <vscale x 8 x float> @llvm.masked.load.nxv8f32(<vscale x 8 x float>*, i32, <vscale x 8 x i1>, <vscale x 8 x float>)
+declare <vscale x 8 x float> @llvm.masked.load.nxv8f32(ptr, i32, <vscale x 8 x i1>, <vscale x 8 x float>)
 
-define <vscale x 8 x double> @masked_load_nxv8f64(<vscale x 8 x double>* %a, <vscale x 8 x i1> %mask) nounwind {
+define <vscale x 8 x double> @masked_load_nxv8f64(ptr %a, <vscale x 8 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_load_nxv8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 8 x double> @llvm.masked.load.nxv8f64(<vscale x 8 x double>* %a, i32 8, <vscale x 8 x i1> %mask, <vscale x 8 x double> undef)
+  %load = call <vscale x 8 x double> @llvm.masked.load.nxv8f64(ptr %a, i32 8, <vscale x 8 x i1> %mask, <vscale x 8 x double> undef)
   ret <vscale x 8 x double> %load
 }
-declare <vscale x 8 x double> @llvm.masked.load.nxv8f64(<vscale x 8 x double>*, i32, <vscale x 8 x i1>, <vscale x 8 x double>)
+declare <vscale x 8 x double> @llvm.masked.load.nxv8f64(ptr, i32, <vscale x 8 x i1>, <vscale x 8 x double>)
 
-define <vscale x 16 x half> @masked_load_nxv16f16(<vscale x 16 x half>* %a, <vscale x 16 x i1> %mask) nounwind {
+define <vscale x 16 x half> @masked_load_nxv16f16(ptr %a, <vscale x 16 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_load_nxv16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 16 x half> @llvm.masked.load.nxv16f16(<vscale x 16 x half>* %a, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x half> undef)
+  %load = call <vscale x 16 x half> @llvm.masked.load.nxv16f16(ptr %a, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x half> undef)
   ret <vscale x 16 x half> %load
 }
-declare <vscale x 16 x half> @llvm.masked.load.nxv16f16(<vscale x 16 x half>*, i32, <vscale x 16 x i1>, <vscale x 16 x half>)
+declare <vscale x 16 x half> @llvm.masked.load.nxv16f16(ptr, i32, <vscale x 16 x i1>, <vscale x 16 x half>)
 
-define <vscale x 16 x float> @masked_load_nxv16f32(<vscale x 16 x float>* %a, <vscale x 16 x i1> %mask) nounwind {
+define <vscale x 16 x float> @masked_load_nxv16f32(ptr %a, <vscale x 16 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_load_nxv16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 16 x float> @llvm.masked.load.nxv16f32(<vscale x 16 x float>* %a, i32 4, <vscale x 16 x i1> %mask, <vscale x 16 x float> undef)
+  %load = call <vscale x 16 x float> @llvm.masked.load.nxv16f32(ptr %a, i32 4, <vscale x 16 x i1> %mask, <vscale x 16 x float> undef)
   ret <vscale x 16 x float> %load
 }
-declare <vscale x 16 x float> @llvm.masked.load.nxv16f32(<vscale x 16 x float>*, i32, <vscale x 16 x i1>, <vscale x 16 x float>)
+declare <vscale x 16 x float> @llvm.masked.load.nxv16f32(ptr, i32, <vscale x 16 x i1>, <vscale x 16 x float>)
 
-define <vscale x 32 x half> @masked_load_nxv32f16(<vscale x 32 x half>* %a, <vscale x 32 x i1> %mask) nounwind {
+define <vscale x 32 x half> @masked_load_nxv32f16(ptr %a, <vscale x 32 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_load_nxv32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 32 x half> @llvm.masked.load.nxv32f16(<vscale x 32 x half>* %a, i32 2, <vscale x 32 x i1> %mask, <vscale x 32 x half> undef)
+  %load = call <vscale x 32 x half> @llvm.masked.load.nxv32f16(ptr %a, i32 2, <vscale x 32 x i1> %mask, <vscale x 32 x half> undef)
   ret <vscale x 32 x half> %load
 }
-declare <vscale x 32 x half> @llvm.masked.load.nxv32f16(<vscale x 32 x half>*, i32, <vscale x 32 x i1>, <vscale x 32 x half>)
+declare <vscale x 32 x half> @llvm.masked.load.nxv32f16(ptr, i32, <vscale x 32 x i1>, <vscale x 32 x half>)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/masked-load-int.ll b/llvm/test/CodeGen/RISCV/rvv/masked-load-int.ll
index dde4185abb9c6..9e2a33f54f420 100644
--- a/llvm/test/CodeGen/RISCV/rvv/masked-load-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/masked-load-int.ll
@@ -2,257 +2,257 @@
 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
 
-define <vscale x 1 x i8> @masked_load_nxv1i8(<vscale x 1 x i8>* %a, <vscale x 1 x i1> %mask) nounwind {
+define <vscale x 1 x i8> @masked_load_nxv1i8(ptr %a, <vscale x 1 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_load_nxv1i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vle8.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 1 x i8> @llvm.masked.load.nxv1i8(<vscale x 1 x i8>* %a, i32 1, <vscale x 1 x i1> %mask, <vscale x 1 x i8> undef)
+  %load = call <vscale x 1 x i8> @llvm.masked.load.nxv1i8(ptr %a, i32 1, <vscale x 1 x i1> %mask, <vscale x 1 x i8> undef)
   ret <vscale x 1 x i8> %load
 }
-declare <vscale x 1 x i8> @llvm.masked.load.nxv1i8(<vscale x 1 x i8>*, i32, <vscale x 1 x i1>, <vscale x 1 x i8>)
+declare <vscale x 1 x i8> @llvm.masked.load.nxv1i8(ptr, i32, <vscale x 1 x i1>, <vscale x 1 x i8>)
 
-define <vscale x 1 x i16> @masked_load_nxv1i16(<vscale x 1 x i16>* %a, <vscale x 1 x i1> %mask) nounwind {
+define <vscale x 1 x i16> @masked_load_nxv1i16(ptr %a, <vscale x 1 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_load_nxv1i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 1 x i16> @llvm.masked.load.nxv1i16(<vscale x 1 x i16>* %a, i32 2, <vscale x 1 x i1> %mask, <vscale x 1 x i16> undef)
+  %load = call <vscale x 1 x i16> @llvm.masked.load.nxv1i16(ptr %a, i32 2, <vscale x 1 x i1> %mask, <vscale x 1 x i16> undef)
   ret <vscale x 1 x i16> %load
 }
-declare <vscale x 1 x i16> @llvm.masked.load.nxv1i16(<vscale x 1 x i16>*, i32, <vscale x 1 x i1>, <vscale x 1 x i16>)
+declare <vscale x 1 x i16> @llvm.masked.load.nxv1i16(ptr, i32, <vscale x 1 x i1>, <vscale x 1 x i16>)
 
-define <vscale x 1 x i32> @masked_load_nxv1i32(<vscale x 1 x i32>* %a, <vscale x 1 x i1> %mask) nounwind {
+define <vscale x 1 x i32> @masked_load_nxv1i32(ptr %a, <vscale x 1 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_load_nxv1i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 1 x i32> @llvm.masked.load.nxv1i32(<vscale x 1 x i32>* %a, i32 4, <vscale x 1 x i1> %mask, <vscale x 1 x i32> undef)
+  %load = call <vscale x 1 x i32> @llvm.masked.load.nxv1i32(ptr %a, i32 4, <vscale x 1 x i1> %mask, <vscale x 1 x i32> undef)
   ret <vscale x 1 x i32> %load
 }
-declare <vscale x 1 x i32> @llvm.masked.load.nxv1i32(<vscale x 1 x i32>*, i32, <vscale x 1 x i1>, <vscale x 1 x i32>)
+declare <vscale x 1 x i32> @llvm.masked.load.nxv1i32(ptr, i32, <vscale x 1 x i1>, <vscale x 1 x i32>)
 
-define <vscale x 1 x i64> @masked_load_nxv1i64(<vscale x 1 x i64>* %a, <vscale x 1 x i1> %mask) nounwind {
+define <vscale x 1 x i64> @masked_load_nxv1i64(ptr %a, <vscale x 1 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_load_nxv1i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 1 x i64> @llvm.masked.load.nxv1i64(<vscale x 1 x i64>* %a, i32 8, <vscale x 1 x i1> %mask, <vscale x 1 x i64> undef)
+  %load = call <vscale x 1 x i64> @llvm.masked.load.nxv1i64(ptr %a, i32 8, <vscale x 1 x i1> %mask, <vscale x 1 x i64> undef)
   ret <vscale x 1 x i64> %load
 }
-declare <vscale x 1 x i64> @llvm.masked.load.nxv1i64(<vscale x 1 x i64>*, i32, <vscale x 1 x i1>, <vscale x 1 x i64>)
+declare <vscale x 1 x i64> @llvm.masked.load.nxv1i64(ptr, i32, <vscale x 1 x i1>, <vscale x 1 x i64>)
 
-define <vscale x 2 x i8> @masked_load_nxv2i8(<vscale x 2 x i8>* %a, <vscale x 2 x i1> %mask) nounwind {
+define <vscale x 2 x i8> @masked_load_nxv2i8(ptr %a, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_load_nxv2i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
 ; CHECK-NEXT:    vle8.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(<vscale x 2 x i8>* %a, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
+  %load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(ptr %a, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
   ret <vscale x 2 x i8> %load
 }
-declare <vscale x 2 x i8> @llvm.masked.load.nxv2i8(<vscale x 2 x i8>*, i32, <vscale x 2 x i1>, <vscale x 2 x i8>)
+declare <vscale x 2 x i8> @llvm.masked.load.nxv2i8(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x i8>)
 
-define <vscale x 2 x i16> @masked_load_nxv2i16(<vscale x 2 x i16>* %a, <vscale x 2 x i1> %mask) nounwind {
+define <vscale x 2 x i16> @masked_load_nxv2i16(ptr %a, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_load_nxv2i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(<vscale x 2 x i16>* %a, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
+  %load = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(ptr %a, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
   ret <vscale x 2 x i16> %load
 }
-declare <vscale x 2 x i16> @llvm.masked.load.nxv2i16(<vscale x 2 x i16>*, i32, <vscale x 2 x i1>, <vscale x 2 x i16>)
+declare <vscale x 2 x i16> @llvm.masked.load.nxv2i16(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x i16>)
 
-define <vscale x 2 x i32> @masked_load_nxv2i32(<vscale x 2 x i32>* %a, <vscale x 2 x i1> %mask) nounwind {
+define <vscale x 2 x i32> @masked_load_nxv2i32(ptr %a, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_load_nxv2i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(<vscale x 2 x i32>* %a, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
+  %load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(ptr %a, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
   ret <vscale x 2 x i32> %load
 }
-declare <vscale x 2 x i32> @llvm.masked.load.nxv2i32(<vscale x 2 x i32>*, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
+declare <vscale x 2 x i32> @llvm.masked.load.nxv2i32(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
 
-define <vscale x 2 x i64> @masked_load_nxv2i64(<vscale x 2 x i64>* %a, <vscale x 2 x i1> %mask) nounwind {
+define <vscale x 2 x i64> @masked_load_nxv2i64(ptr %a, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_load_nxv2i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64(<vscale x 2 x i64>* %a, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> undef)
+  %load = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64(ptr %a, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> undef)
   ret <vscale x 2 x i64> %load
 }
-declare <vscale x 2 x i64> @llvm.masked.load.nxv2i64(<vscale x 2 x i64>*, i32, <vscale x 2 x i1>, <vscale x 2 x i64>)
+declare <vscale x 2 x i64> @llvm.masked.load.nxv2i64(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x i64>)
 
-define <vscale x 4 x i8> @masked_load_nxv4i8(<vscale x 4 x i8>* %a, <vscale x 4 x i1> %mask) nounwind {
+define <vscale x 4 x i8> @masked_load_nxv4i8(ptr %a, <vscale x 4 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_load_nxv4i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
 ; CHECK-NEXT:    vle8.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(<vscale x 4 x i8>* %a, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i8> undef)
+  %load = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(ptr %a, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i8> undef)
   ret <vscale x 4 x i8> %load
 }
-declare <vscale x 4 x i8> @llvm.masked.load.nxv4i8(<vscale x 4 x i8>*, i32, <vscale x 4 x i1>, <vscale x 4 x i8>)
+declare <vscale x 4 x i8> @llvm.masked.load.nxv4i8(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x i8>)
 
-define <vscale x 4 x i16> @masked_load_nxv4i16(<vscale x 4 x i16>* %a, <vscale x 4 x i1> %mask) nounwind {
+define <vscale x 4 x i16> @masked_load_nxv4i16(ptr %a, <vscale x 4 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_load_nxv4i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(<vscale x 4 x i16>* %a, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i16> undef)
+  %load = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(ptr %a, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i16> undef)
   ret <vscale x 4 x i16> %load
 }
-declare <vscale x 4 x i16> @llvm.masked.load.nxv4i16(<vscale x 4 x i16>*, i32, <vscale x 4 x i1>, <vscale x 4 x i16>)
+declare <vscale x 4 x i16> @llvm.masked.load.nxv4i16(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x i16>)
 
-define <vscale x 4 x i32> @masked_load_nxv4i32(<vscale x 4 x i32>* %a, <vscale x 4 x i1> %mask) nounwind {
+define <vscale x 4 x i32> @masked_load_nxv4i32(ptr %a, <vscale x 4 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_load_nxv4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32(<vscale x 4 x i32>* %a, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
+  %load = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32(ptr %a, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
   ret <vscale x 4 x i32> %load
 }
-declare <vscale x 4 x i32> @llvm.masked.load.nxv4i32(<vscale x 4 x i32>*, i32, <vscale x 4 x i1>, <vscale x 4 x i32>)
+declare <vscale x 4 x i32> @llvm.masked.load.nxv4i32(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x i32>)
 
-define <vscale x 4 x i64> @masked_load_nxv4i64(<vscale x 4 x i64>* %a, <vscale x 4 x i1> %mask) nounwind {
+define <vscale x 4 x i64> @masked_load_nxv4i64(ptr %a, <vscale x 4 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_load_nxv4i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 4 x i64> @llvm.masked.load.nxv4i64(<vscale x 4 x i64>* %a, i32 8, <vscale x 4 x i1> %mask, <vscale x 4 x i64> undef)
+  %load = call <vscale x 4 x i64> @llvm.masked.load.nxv4i64(ptr %a, i32 8, <vscale x 4 x i1> %mask, <vscale x 4 x i64> undef)
   ret <vscale x 4 x i64> %load
 }
-declare <vscale x 4 x i64> @llvm.masked.load.nxv4i64(<vscale x 4 x i64>*, i32, <vscale x 4 x i1>, <vscale x 4 x i64>)
+declare <vscale x 4 x i64> @llvm.masked.load.nxv4i64(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x i64>)
 
-define <vscale x 8 x i8> @masked_load_nxv8i8(<vscale x 8 x i8>* %a, <vscale x 8 x i1> %mask) nounwind {
+define <vscale x 8 x i8> @masked_load_nxv8i8(ptr %a, <vscale x 8 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_load_nxv8i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vle8.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8>* %a, i32 1, <vscale x 8 x i1> %mask, <vscale x 8 x i8> undef)
+  %load = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(ptr %a, i32 1, <vscale x 8 x i1> %mask, <vscale x 8 x i8> undef)
   ret <vscale x 8 x i8> %load
 }
-declare <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8>*, i32, <vscale x 8 x i1>, <vscale x 8 x i8>)
+declare <vscale x 8 x i8> @llvm.masked.load.nxv8i8(ptr, i32, <vscale x 8 x i1>, <vscale x 8 x i8>)
 
-define <vscale x 8 x i16> @masked_load_nxv8i16(<vscale x 8 x i16>* %a, <vscale x 8 x i1> %mask) nounwind {
+define <vscale x 8 x i16> @masked_load_nxv8i16(ptr %a, <vscale x 8 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_load_nxv8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16(<vscale x 8 x i16>* %a, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x i16> undef)
+  %load = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16(ptr %a, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x i16> undef)
   ret <vscale x 8 x i16> %load
 }
-declare <vscale x 8 x i16> @llvm.masked.load.nxv8i16(<vscale x 8 x i16>*, i32, <vscale x 8 x i1>, <vscale x 8 x i16>)
+declare <vscale x 8 x i16> @llvm.masked.load.nxv8i16(ptr, i32, <vscale x 8 x i1>, <vscale x 8 x i16>)
 
-define <vscale x 8 x i32> @masked_load_nxv8i32(<vscale x 8 x i32>* %a, <vscale x 8 x i1> %mask) nounwind {
+define <vscale x 8 x i32> @masked_load_nxv8i32(ptr %a, <vscale x 8 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_load_nxv8i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 8 x i32> @llvm.masked.load.nxv8i32(<vscale x 8 x i32>* %a, i32 4, <vscale x 8 x i1> %mask, <vscale x 8 x i32> undef)
+  %load = call <vscale x 8 x i32> @llvm.masked.load.nxv8i32(ptr %a, i32 4, <vscale x 8 x i1> %mask, <vscale x 8 x i32> undef)
   ret <vscale x 8 x i32> %load
 }
-declare <vscale x 8 x i32> @llvm.masked.load.nxv8i32(<vscale x 8 x i32>*, i32, <vscale x 8 x i1>, <vscale x 8 x i32>)
+declare <vscale x 8 x i32> @llvm.masked.load.nxv8i32(ptr, i32, <vscale x 8 x i1>, <vscale x 8 x i32>)
 
-define <vscale x 8 x i64> @masked_load_nxv8i64(<vscale x 8 x i64>* %a, <vscale x 8 x i1> %mask) nounwind {
+define <vscale x 8 x i64> @masked_load_nxv8i64(ptr %a, <vscale x 8 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_load_nxv8i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 8 x i64> @llvm.masked.load.nxv8i64(<vscale x 8 x i64>* %a, i32 8, <vscale x 8 x i1> %mask, <vscale x 8 x i64> undef)
+  %load = call <vscale x 8 x i64> @llvm.masked.load.nxv8i64(ptr %a, i32 8, <vscale x 8 x i1> %mask, <vscale x 8 x i64> undef)
   ret <vscale x 8 x i64> %load
 }
-declare <vscale x 8 x i64> @llvm.masked.load.nxv8i64(<vscale x 8 x i64>*, i32, <vscale x 8 x i1>, <vscale x 8 x i64>)
+declare <vscale x 8 x i64> @llvm.masked.load.nxv8i64(ptr, i32, <vscale x 8 x i1>, <vscale x 8 x i64>)
 
-define <vscale x 16 x i8> @masked_load_nxv16i8(<vscale x 16 x i8>* %a, <vscale x 16 x i1> %mask) nounwind {
+define <vscale x 16 x i8> @masked_load_nxv16i8(ptr %a, <vscale x 16 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_load_nxv16i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
 ; CHECK-NEXT:    vle8.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8(<vscale x 16 x i8>* %a, i32 1, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
+  %load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8(ptr %a, i32 1, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
   ret <vscale x 16 x i8> %load
 }
-declare <vscale x 16 x i8> @llvm.masked.load.nxv16i8(<vscale x 16 x i8>*, i32, <vscale x 16 x i1>, <vscale x 16 x i8>)
+declare <vscale x 16 x i8> @llvm.masked.load.nxv16i8(ptr, i32, <vscale x 16 x i1>, <vscale x 16 x i8>)
 
-define <vscale x 16 x i16> @masked_load_nxv16i16(<vscale x 16 x i16>* %a, <vscale x 16 x i1> %mask) nounwind {
+define <vscale x 16 x i16> @masked_load_nxv16i16(ptr %a, <vscale x 16 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_load_nxv16i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 16 x i16> @llvm.masked.load.nxv16i16(<vscale x 16 x i16>* %a, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i16> undef)
+  %load = call <vscale x 16 x i16> @llvm.masked.load.nxv16i16(ptr %a, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i16> undef)
   ret <vscale x 16 x i16> %load
 }
-declare <vscale x 16 x i16> @llvm.masked.load.nxv16i16(<vscale x 16 x i16>*, i32, <vscale x 16 x i1>, <vscale x 16 x i16>)
+declare <vscale x 16 x i16> @llvm.masked.load.nxv16i16(ptr, i32, <vscale x 16 x i1>, <vscale x 16 x i16>)
 
-define <vscale x 16 x i32> @masked_load_nxv16i32(<vscale x 16 x i32>* %a, <vscale x 16 x i1> %mask) nounwind {
+define <vscale x 16 x i32> @masked_load_nxv16i32(ptr %a, <vscale x 16 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_load_nxv16i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 16 x i32> @llvm.masked.load.nxv16i32(<vscale x 16 x i32>* %a, i32 4, <vscale x 16 x i1> %mask, <vscale x 16 x i32> undef)
+  %load = call <vscale x 16 x i32> @llvm.masked.load.nxv16i32(ptr %a, i32 4, <vscale x 16 x i1> %mask, <vscale x 16 x i32> undef)
   ret <vscale x 16 x i32> %load
 }
-declare <vscale x 16 x i32> @llvm.masked.load.nxv16i32(<vscale x 16 x i32>*, i32, <vscale x 16 x i1>, <vscale x 16 x i32>)
+declare <vscale x 16 x i32> @llvm.masked.load.nxv16i32(ptr, i32, <vscale x 16 x i1>, <vscale x 16 x i32>)
 
-define <vscale x 32 x i8> @masked_load_nxv32i8(<vscale x 32 x i8>* %a, <vscale x 32 x i1> %mask) nounwind {
+define <vscale x 32 x i8> @masked_load_nxv32i8(ptr %a, <vscale x 32 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_load_nxv32i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
 ; CHECK-NEXT:    vle8.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 32 x i8> @llvm.masked.load.nxv32i8(<vscale x 32 x i8>* %a, i32 1, <vscale x 32 x i1> %mask, <vscale x 32 x i8> undef)
+  %load = call <vscale x 32 x i8> @llvm.masked.load.nxv32i8(ptr %a, i32 1, <vscale x 32 x i1> %mask, <vscale x 32 x i8> undef)
   ret <vscale x 32 x i8> %load
 }
-declare <vscale x 32 x i8> @llvm.masked.load.nxv32i8(<vscale x 32 x i8>*, i32, <vscale x 32 x i1>, <vscale x 32 x i8>)
+declare <vscale x 32 x i8> @llvm.masked.load.nxv32i8(ptr, i32, <vscale x 32 x i1>, <vscale x 32 x i8>)
 
-define <vscale x 32 x i16> @masked_load_nxv32i16(<vscale x 32 x i16>* %a, <vscale x 32 x i1> %mask) nounwind {
+define <vscale x 32 x i16> @masked_load_nxv32i16(ptr %a, <vscale x 32 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_load_nxv32i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 32 x i16> @llvm.masked.load.nxv32i16(<vscale x 32 x i16>* %a, i32 2, <vscale x 32 x i1> %mask, <vscale x 32 x i16> undef)
+  %load = call <vscale x 32 x i16> @llvm.masked.load.nxv32i16(ptr %a, i32 2, <vscale x 32 x i1> %mask, <vscale x 32 x i16> undef)
   ret <vscale x 32 x i16> %load
 }
-declare <vscale x 32 x i16> @llvm.masked.load.nxv32i16(<vscale x 32 x i16>*, i32, <vscale x 32 x i1>, <vscale x 32 x i16>)
+declare <vscale x 32 x i16> @llvm.masked.load.nxv32i16(ptr, i32, <vscale x 32 x i1>, <vscale x 32 x i16>)
 
-define <vscale x 64 x i8> @masked_load_nxv64i8(<vscale x 64 x i8>* %a, <vscale x 64 x i1> %mask) nounwind {
+define <vscale x 64 x i8> @masked_load_nxv64i8(ptr %a, <vscale x 64 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_load_nxv64i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, ma
 ; CHECK-NEXT:    vle8.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 64 x i8> @llvm.masked.load.nxv64i8(<vscale x 64 x i8>* %a, i32 1, <vscale x 64 x i1> %mask, <vscale x 64 x i8> undef)
+  %load = call <vscale x 64 x i8> @llvm.masked.load.nxv64i8(ptr %a, i32 1, <vscale x 64 x i1> %mask, <vscale x 64 x i8> undef)
   ret <vscale x 64 x i8> %load
 }
-declare <vscale x 64 x i8> @llvm.masked.load.nxv64i8(<vscale x 64 x i8>*, i32, <vscale x 64 x i1>, <vscale x 64 x i8>)
+declare <vscale x 64 x i8> @llvm.masked.load.nxv64i8(ptr, i32, <vscale x 64 x i1>, <vscale x 64 x i8>)
 
-define <vscale x 2 x i8> @masked_load_zero_mask(<vscale x 2 x i8>* %a) nounwind {
+define <vscale x 2 x i8> @masked_load_zero_mask(ptr %a) nounwind {
 ; CHECK-LABEL: masked_load_zero_mask:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(<vscale x 2 x i8>* %a, i32 1, <vscale x 2 x i1> zeroinitializer, <vscale x 2 x i8> undef)
+  %load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(ptr %a, i32 1, <vscale x 2 x i1> zeroinitializer, <vscale x 2 x i8> undef)
   ret <vscale x 2 x i8> %load
 }
 
-define <vscale x 2 x i8> @masked_load_allones_mask(<vscale x 2 x i8>* %a, <vscale x 2 x i8> %maskedoff) nounwind {
+define <vscale x 2 x i8> @masked_load_allones_mask(ptr %a, <vscale x 2 x i8> %maskedoff) nounwind {
 ; CHECK-LABEL: masked_load_allones_mask:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
@@ -260,6 +260,6 @@ define <vscale x 2 x i8> @masked_load_allones_mask(<vscale x 2 x i8>* %a, <vscal
 ; CHECK-NEXT:    ret
   %insert = insertelement <vscale x 2 x i1> poison, i1 1, i32 0
   %mask = shufflevector <vscale x 2 x i1> %insert, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(<vscale x 2 x i8>* %a, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> %maskedoff)
+  %load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(ptr %a, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> %maskedoff)
   ret <vscale x 2 x i8> %load
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/masked-store-fp.ll b/llvm/test/CodeGen/RISCV/rvv/masked-store-fp.ll
index e140e1183fd52..17193aef1dff9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/masked-store-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/masked-store-fp.ll
@@ -2,167 +2,167 @@
 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s
 
-define void @masked_store_nxv1f16(<vscale x 1 x half> %val, <vscale x 1 x half>* %a, <vscale x 1 x i1> %mask) nounwind {
+define void @masked_store_nxv1f16(<vscale x 1 x half> %val, ptr %a, <vscale x 1 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_store_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.nxv1f16.p0(<vscale x 1 x half> %val, <vscale x 1 x half>* %a, i32 2, <vscale x 1 x i1> %mask)
+  call void @llvm.masked.store.nxv1f16.p0(<vscale x 1 x half> %val, ptr %a, i32 2, <vscale x 1 x i1> %mask)
   ret void
 }
-declare void @llvm.masked.store.nxv1f16.p0(<vscale x 1 x half>, <vscale x 1 x half>*, i32, <vscale x 1 x i1>)
+declare void @llvm.masked.store.nxv1f16.p0(<vscale x 1 x half>, ptr, i32, <vscale x 1 x i1>)
 
-define void @masked_store_nxv1f32(<vscale x 1 x float> %val, <vscale x 1 x float>* %a, <vscale x 1 x i1> %mask) nounwind {
+define void @masked_store_nxv1f32(<vscale x 1 x float> %val, ptr %a, <vscale x 1 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_store_nxv1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.nxv1f32.p0(<vscale x 1 x float> %val, <vscale x 1 x float>* %a, i32 4, <vscale x 1 x i1> %mask)
+  call void @llvm.masked.store.nxv1f32.p0(<vscale x 1 x float> %val, ptr %a, i32 4, <vscale x 1 x i1> %mask)
   ret void
 }
-declare void @llvm.masked.store.nxv1f32.p0(<vscale x 1 x float>, <vscale x 1 x float>*, i32, <vscale x 1 x i1>)
+declare void @llvm.masked.store.nxv1f32.p0(<vscale x 1 x float>, ptr, i32, <vscale x 1 x i1>)
 
-define void @masked_store_nxv1f64(<vscale x 1 x double> %val, <vscale x 1 x double>* %a, <vscale x 1 x i1> %mask) nounwind {
+define void @masked_store_nxv1f64(<vscale x 1 x double> %val, ptr %a, <vscale x 1 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_store_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.nxv1f64.p0(<vscale x 1 x double> %val, <vscale x 1 x double>* %a, i32 8, <vscale x 1 x i1> %mask)
+  call void @llvm.masked.store.nxv1f64.p0(<vscale x 1 x double> %val, ptr %a, i32 8, <vscale x 1 x i1> %mask)
   ret void
 }
-declare void @llvm.masked.store.nxv1f64.p0(<vscale x 1 x double>, <vscale x 1 x double>*, i32, <vscale x 1 x i1>)
+declare void @llvm.masked.store.nxv1f64.p0(<vscale x 1 x double>, ptr, i32, <vscale x 1 x i1>)
 
-define void @masked_store_nxv2f16(<vscale x 2 x half> %val, <vscale x 2 x half>* %a, <vscale x 2 x i1> %mask) nounwind {
+define void @masked_store_nxv2f16(<vscale x 2 x half> %val, ptr %a, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_store_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.nxv2f16.p0(<vscale x 2 x half> %val, <vscale x 2 x half>* %a, i32 2, <vscale x 2 x i1> %mask)
+  call void @llvm.masked.store.nxv2f16.p0(<vscale x 2 x half> %val, ptr %a, i32 2, <vscale x 2 x i1> %mask)
   ret void
 }
-declare void @llvm.masked.store.nxv2f16.p0(<vscale x 2 x half>, <vscale x 2 x half>*, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.store.nxv2f16.p0(<vscale x 2 x half>, ptr, i32, <vscale x 2 x i1>)
 
-define void @masked_store_nxv2f32(<vscale x 2 x float> %val, <vscale x 2 x float>* %a, <vscale x 2 x i1> %mask) nounwind {
+define void @masked_store_nxv2f32(<vscale x 2 x float> %val, ptr %a, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_store_nxv2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.nxv2f32.p0(<vscale x 2 x float> %val, <vscale x 2 x float>* %a, i32 4, <vscale x 2 x i1> %mask)
+  call void @llvm.masked.store.nxv2f32.p0(<vscale x 2 x float> %val, ptr %a, i32 4, <vscale x 2 x i1> %mask)
   ret void
 }
-declare void @llvm.masked.store.nxv2f32.p0(<vscale x 2 x float>, <vscale x 2 x float>*, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.store.nxv2f32.p0(<vscale x 2 x float>, ptr, i32, <vscale x 2 x i1>)
 
-define void @masked_store_nxv2f64(<vscale x 2 x double> %val, <vscale x 2 x double>* %a, <vscale x 2 x i1> %mask) nounwind {
+define void @masked_store_nxv2f64(<vscale x 2 x double> %val, ptr %a, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_store_nxv2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.nxv2f64.p0(<vscale x 2 x double> %val, <vscale x 2 x double>* %a, i32 8, <vscale x 2 x i1> %mask)
+  call void @llvm.masked.store.nxv2f64.p0(<vscale x 2 x double> %val, ptr %a, i32 8, <vscale x 2 x i1> %mask)
   ret void
 }
-declare void @llvm.masked.store.nxv2f64.p0(<vscale x 2 x double>, <vscale x 2 x double>*, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.store.nxv2f64.p0(<vscale x 2 x double>, ptr, i32, <vscale x 2 x i1>)
 
-define void @masked_store_nxv4f16(<vscale x 4 x half> %val, <vscale x 4 x half>* %a, <vscale x 4 x i1> %mask) nounwind {
+define void @masked_store_nxv4f16(<vscale x 4 x half> %val, ptr %a, <vscale x 4 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_store_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.nxv4f16.p0(<vscale x 4 x half> %val, <vscale x 4 x half>* %a, i32 2, <vscale x 4 x i1> %mask)
+  call void @llvm.masked.store.nxv4f16.p0(<vscale x 4 x half> %val, ptr %a, i32 2, <vscale x 4 x i1> %mask)
   ret void
 }
-declare void @llvm.masked.store.nxv4f16.p0(<vscale x 4 x half>, <vscale x 4 x half>*, i32, <vscale x 4 x i1>)
+declare void @llvm.masked.store.nxv4f16.p0(<vscale x 4 x half>, ptr, i32, <vscale x 4 x i1>)
 
-define void @masked_store_nxv4f32(<vscale x 4 x float> %val, <vscale x 4 x float>* %a, <vscale x 4 x i1> %mask) nounwind {
+define void @masked_store_nxv4f32(<vscale x 4 x float> %val, ptr %a, <vscale x 4 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_store_nxv4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> %val, <vscale x 4 x float>* %a, i32 4, <vscale x 4 x i1> %mask)
+  call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> %val, ptr %a, i32 4, <vscale x 4 x i1> %mask)
   ret void
 }
-declare void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float>, <vscale x 4 x float>*, i32, <vscale x 4 x i1>)
+declare void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float>, ptr, i32, <vscale x 4 x i1>)
 
-define void @masked_store_nxv4f64(<vscale x 4 x double> %val, <vscale x 4 x double>* %a, <vscale x 4 x i1> %mask) nounwind {
+define void @masked_store_nxv4f64(<vscale x 4 x double> %val, ptr %a, <vscale x 4 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_store_nxv4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.nxv4f64.p0(<vscale x 4 x double> %val, <vscale x 4 x double>* %a, i32 8, <vscale x 4 x i1> %mask)
+  call void @llvm.masked.store.nxv4f64.p0(<vscale x 4 x double> %val, ptr %a, i32 8, <vscale x 4 x i1> %mask)
   ret void
 }
-declare void @llvm.masked.store.nxv4f64.p0(<vscale x 4 x double>, <vscale x 4 x double>*, i32, <vscale x 4 x i1>)
+declare void @llvm.masked.store.nxv4f64.p0(<vscale x 4 x double>, ptr, i32, <vscale x 4 x i1>)
 
-define void @masked_store_nxv8f16(<vscale x 8 x half> %val, <vscale x 8 x half>* %a, <vscale x 8 x i1> %mask) nounwind {
+define void @masked_store_nxv8f16(<vscale x 8 x half> %val, ptr %a, <vscale x 8 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_store_nxv8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.nxv8f16.p0(<vscale x 8 x half> %val, <vscale x 8 x half>* %a, i32 2, <vscale x 8 x i1> %mask)
+  call void @llvm.masked.store.nxv8f16.p0(<vscale x 8 x half> %val, ptr %a, i32 2, <vscale x 8 x i1> %mask)
   ret void
 }
-declare void @llvm.masked.store.nxv8f16.p0(<vscale x 8 x half>, <vscale x 8 x half>*, i32, <vscale x 8 x i1>)
+declare void @llvm.masked.store.nxv8f16.p0(<vscale x 8 x half>, ptr, i32, <vscale x 8 x i1>)
 
-define void @masked_store_nxv8f32(<vscale x 8 x float> %val, <vscale x 8 x float>* %a, <vscale x 8 x i1> %mask) nounwind {
+define void @masked_store_nxv8f32(<vscale x 8 x float> %val, ptr %a, <vscale x 8 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_store_nxv8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.nxv8f32.p0(<vscale x 8 x float> %val, <vscale x 8 x float>* %a, i32 4, <vscale x 8 x i1> %mask)
+  call void @llvm.masked.store.nxv8f32.p0(<vscale x 8 x float> %val, ptr %a, i32 4, <vscale x 8 x i1> %mask)
   ret void
 }
-declare void @llvm.masked.store.nxv8f32.p0(<vscale x 8 x float>, <vscale x 8 x float>*, i32, <vscale x 8 x i1>)
+declare void @llvm.masked.store.nxv8f32.p0(<vscale x 8 x float>, ptr, i32, <vscale x 8 x i1>)
 
-define void @masked_store_nxv8f64(<vscale x 8 x double> %val, <vscale x 8 x double>* %a, <vscale x 8 x i1> %mask) nounwind {
+define void @masked_store_nxv8f64(<vscale x 8 x double> %val, ptr %a, <vscale x 8 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_store_nxv8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.nxv8f64.p0(<vscale x 8 x double> %val, <vscale x 8 x double>* %a, i32 8, <vscale x 8 x i1> %mask)
+  call void @llvm.masked.store.nxv8f64.p0(<vscale x 8 x double> %val, ptr %a, i32 8, <vscale x 8 x i1> %mask)
   ret void
 }
-declare void @llvm.masked.store.nxv8f64.p0(<vscale x 8 x double>, <vscale x 8 x double>*, i32, <vscale x 8 x i1>)
+declare void @llvm.masked.store.nxv8f64.p0(<vscale x 8 x double>, ptr, i32, <vscale x 8 x i1>)
 
-define void @masked_store_nxv16f16(<vscale x 16 x half> %val, <vscale x 16 x half>* %a, <vscale x 16 x i1> %mask) nounwind {
+define void @masked_store_nxv16f16(<vscale x 16 x half> %val, ptr %a, <vscale x 16 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_store_nxv16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.nxv16f16.p0(<vscale x 16 x half> %val, <vscale x 16 x half>* %a, i32 2, <vscale x 16 x i1> %mask)
+  call void @llvm.masked.store.nxv16f16.p0(<vscale x 16 x half> %val, ptr %a, i32 2, <vscale x 16 x i1> %mask)
   ret void
 }
-declare void @llvm.masked.store.nxv16f16.p0(<vscale x 16 x half>, <vscale x 16 x half>*, i32, <vscale x 16 x i1>)
+declare void @llvm.masked.store.nxv16f16.p0(<vscale x 16 x half>, ptr, i32, <vscale x 16 x i1>)
 
-define void @masked_store_nxv16f32(<vscale x 16 x float> %val, <vscale x 16 x float>* %a, <vscale x 16 x i1> %mask) nounwind {
+define void @masked_store_nxv16f32(<vscale x 16 x float> %val, ptr %a, <vscale x 16 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_store_nxv16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.nxv16f32.p0(<vscale x 16 x float> %val, <vscale x 16 x float>* %a, i32 4, <vscale x 16 x i1> %mask)
+  call void @llvm.masked.store.nxv16f32.p0(<vscale x 16 x float> %val, ptr %a, i32 4, <vscale x 16 x i1> %mask)
   ret void
 }
-declare void @llvm.masked.store.nxv16f32.p0(<vscale x 16 x float>, <vscale x 16 x float>*, i32, <vscale x 16 x i1>)
+declare void @llvm.masked.store.nxv16f32.p0(<vscale x 16 x float>, ptr, i32, <vscale x 16 x i1>)
 
-define void @masked_store_nxv32f16(<vscale x 32 x half> %val, <vscale x 32 x half>* %a, <vscale x 32 x i1> %mask) nounwind {
+define void @masked_store_nxv32f16(<vscale x 32 x half> %val, ptr %a, <vscale x 32 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_store_nxv32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.nxv32f16.p0(<vscale x 32 x half> %val, <vscale x 32 x half>* %a, i32 2, <vscale x 32 x i1> %mask)
+  call void @llvm.masked.store.nxv32f16.p0(<vscale x 32 x half> %val, ptr %a, i32 2, <vscale x 32 x i1> %mask)
   ret void
 }
-declare void @llvm.masked.store.nxv32f16.p0(<vscale x 32 x half>, <vscale x 32 x half>*, i32, <vscale x 32 x i1>)
+declare void @llvm.masked.store.nxv32f16.p0(<vscale x 32 x half>, ptr, i32, <vscale x 32 x i1>)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/masked-store-int.ll b/llvm/test/CodeGen/RISCV/rvv/masked-store-int.ll
index 4c47cb0755751..a2fec5ab0798a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/masked-store-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/masked-store-int.ll
@@ -2,257 +2,257 @@
 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
 
-define void @masked_store_nxv1i8(<vscale x 1 x i8> %val, <vscale x 1 x i8>* %a, <vscale x 1 x i1> %mask) nounwind {
+define void @masked_store_nxv1i8(<vscale x 1 x i8> %val, ptr %a, <vscale x 1 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_store_nxv1i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vse8.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.v1i8.p0(<vscale x 1 x i8> %val, <vscale x 1 x i8>* %a, i32 1, <vscale x 1 x i1> %mask)
+  call void @llvm.masked.store.v1i8.p0(<vscale x 1 x i8> %val, ptr %a, i32 1, <vscale x 1 x i1> %mask)
   ret void
 }
-declare void @llvm.masked.store.v1i8.p0(<vscale x 1 x i8>, <vscale x 1 x i8>*, i32, <vscale x 1 x i1>)
+declare void @llvm.masked.store.v1i8.p0(<vscale x 1 x i8>, ptr, i32, <vscale x 1 x i1>)
 
-define void @masked_store_nxv1i16(<vscale x 1 x i16> %val, <vscale x 1 x i16>* %a, <vscale x 1 x i1> %mask) nounwind {
+define void @masked_store_nxv1i16(<vscale x 1 x i16> %val, ptr %a, <vscale x 1 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_store_nxv1i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.v1i16.p0(<vscale x 1 x i16> %val, <vscale x 1 x i16>* %a, i32 2, <vscale x 1 x i1> %mask)
+  call void @llvm.masked.store.v1i16.p0(<vscale x 1 x i16> %val, ptr %a, i32 2, <vscale x 1 x i1> %mask)
   ret void
 }
-declare void @llvm.masked.store.v1i16.p0(<vscale x 1 x i16>, <vscale x 1 x i16>*, i32, <vscale x 1 x i1>)
+declare void @llvm.masked.store.v1i16.p0(<vscale x 1 x i16>, ptr, i32, <vscale x 1 x i1>)
 
-define void @masked_store_nxv1i32(<vscale x 1 x i32> %val, <vscale x 1 x i32>* %a, <vscale x 1 x i1> %mask) nounwind {
+define void @masked_store_nxv1i32(<vscale x 1 x i32> %val, ptr %a, <vscale x 1 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_store_nxv1i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.v1i32.p0(<vscale x 1 x i32> %val, <vscale x 1 x i32>* %a, i32 4, <vscale x 1 x i1> %mask)
+  call void @llvm.masked.store.v1i32.p0(<vscale x 1 x i32> %val, ptr %a, i32 4, <vscale x 1 x i1> %mask)
   ret void
 }
-declare void @llvm.masked.store.v1i32.p0(<vscale x 1 x i32>, <vscale x 1 x i32>*, i32, <vscale x 1 x i1>)
+declare void @llvm.masked.store.v1i32.p0(<vscale x 1 x i32>, ptr, i32, <vscale x 1 x i1>)
 
-define void @masked_store_nxv1i64(<vscale x 1 x i64> %val, <vscale x 1 x i64>* %a, <vscale x 1 x i1> %mask) nounwind {
+define void @masked_store_nxv1i64(<vscale x 1 x i64> %val, ptr %a, <vscale x 1 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_store_nxv1i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.v1i64.p0(<vscale x 1 x i64> %val, <vscale x 1 x i64>* %a, i32 8, <vscale x 1 x i1> %mask)
+  call void @llvm.masked.store.v1i64.p0(<vscale x 1 x i64> %val, ptr %a, i32 8, <vscale x 1 x i1> %mask)
   ret void
 }
-declare void @llvm.masked.store.v1i64.p0(<vscale x 1 x i64>, <vscale x 1 x i64>*, i32, <vscale x 1 x i1>)
+declare void @llvm.masked.store.v1i64.p0(<vscale x 1 x i64>, ptr, i32, <vscale x 1 x i1>)
 
-define void @masked_store_nxv2i8(<vscale x 2 x i8> %val, <vscale x 2 x i8>* %a, <vscale x 2 x i1> %mask) nounwind {
+define void @masked_store_nxv2i8(<vscale x 2 x i8> %val, ptr %a, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_store_nxv2i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
 ; CHECK-NEXT:    vse8.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.v2i8.p0(<vscale x 2 x i8> %val, <vscale x 2 x i8>* %a, i32 1, <vscale x 2 x i1> %mask)
+  call void @llvm.masked.store.v2i8.p0(<vscale x 2 x i8> %val, ptr %a, i32 1, <vscale x 2 x i1> %mask)
   ret void
 }
-declare void @llvm.masked.store.v2i8.p0(<vscale x 2 x i8>, <vscale x 2 x i8>*, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.store.v2i8.p0(<vscale x 2 x i8>, ptr, i32, <vscale x 2 x i1>)
 
-define void @masked_store_nxv2i16(<vscale x 2 x i16> %val, <vscale x 2 x i16>* %a, <vscale x 2 x i1> %mask) nounwind {
+define void @masked_store_nxv2i16(<vscale x 2 x i16> %val, ptr %a, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_store_nxv2i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.v2i16.p0(<vscale x 2 x i16> %val, <vscale x 2 x i16>* %a, i32 2, <vscale x 2 x i1> %mask)
+  call void @llvm.masked.store.v2i16.p0(<vscale x 2 x i16> %val, ptr %a, i32 2, <vscale x 2 x i1> %mask)
   ret void
 }
-declare void @llvm.masked.store.v2i16.p0(<vscale x 2 x i16>, <vscale x 2 x i16>*, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.store.v2i16.p0(<vscale x 2 x i16>, ptr, i32, <vscale x 2 x i1>)
 
-define void @masked_store_nxv2i32(<vscale x 2 x i32> %val, <vscale x 2 x i32>* %a, <vscale x 2 x i1> %mask) nounwind {
+define void @masked_store_nxv2i32(<vscale x 2 x i32> %val, ptr %a, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_store_nxv2i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.v2i32.p0(<vscale x 2 x i32> %val, <vscale x 2 x i32>* %a, i32 4, <vscale x 2 x i1> %mask)
+  call void @llvm.masked.store.v2i32.p0(<vscale x 2 x i32> %val, ptr %a, i32 4, <vscale x 2 x i1> %mask)
   ret void
 }
-declare void @llvm.masked.store.v2i32.p0(<vscale x 2 x i32>, <vscale x 2 x i32>*, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.store.v2i32.p0(<vscale x 2 x i32>, ptr, i32, <vscale x 2 x i1>)
 
-define void @masked_store_nxv2i64(<vscale x 2 x i64> %val, <vscale x 2 x i64>* %a, <vscale x 2 x i1> %mask) nounwind {
+define void @masked_store_nxv2i64(<vscale x 2 x i64> %val, ptr %a, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_store_nxv2i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.v2i64.p0(<vscale x 2 x i64> %val, <vscale x 2 x i64>* %a, i32 8, <vscale x 2 x i1> %mask)
+  call void @llvm.masked.store.v2i64.p0(<vscale x 2 x i64> %val, ptr %a, i32 8, <vscale x 2 x i1> %mask)
   ret void
 }
-declare void @llvm.masked.store.v2i64.p0(<vscale x 2 x i64>, <vscale x 2 x i64>*, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.store.v2i64.p0(<vscale x 2 x i64>, ptr, i32, <vscale x 2 x i1>)
 
-define void @masked_store_nxv4i8(<vscale x 4 x i8> %val, <vscale x 4 x i8>* %a, <vscale x 4 x i1> %mask) nounwind {
+define void @masked_store_nxv4i8(<vscale x 4 x i8> %val, ptr %a, <vscale x 4 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_store_nxv4i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
 ; CHECK-NEXT:    vse8.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.v4i8.p0(<vscale x 4 x i8> %val, <vscale x 4 x i8>* %a, i32 1, <vscale x 4 x i1> %mask)
+  call void @llvm.masked.store.v4i8.p0(<vscale x 4 x i8> %val, ptr %a, i32 1, <vscale x 4 x i1> %mask)
   ret void
 }
-declare void @llvm.masked.store.v4i8.p0(<vscale x 4 x i8>, <vscale x 4 x i8>*, i32, <vscale x 4 x i1>)
+declare void @llvm.masked.store.v4i8.p0(<vscale x 4 x i8>, ptr, i32, <vscale x 4 x i1>)
 
-define void @masked_store_nxv4i16(<vscale x 4 x i16> %val, <vscale x 4 x i16>* %a, <vscale x 4 x i1> %mask) nounwind {
+define void @masked_store_nxv4i16(<vscale x 4 x i16> %val, ptr %a, <vscale x 4 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_store_nxv4i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.v4i16.p0(<vscale x 4 x i16> %val, <vscale x 4 x i16>* %a, i32 2, <vscale x 4 x i1> %mask)
+  call void @llvm.masked.store.v4i16.p0(<vscale x 4 x i16> %val, ptr %a, i32 2, <vscale x 4 x i1> %mask)
   ret void
 }
-declare void @llvm.masked.store.v4i16.p0(<vscale x 4 x i16>, <vscale x 4 x i16>*, i32, <vscale x 4 x i1>)
+declare void @llvm.masked.store.v4i16.p0(<vscale x 4 x i16>, ptr, i32, <vscale x 4 x i1>)
 
-define void @masked_store_nxv4i32(<vscale x 4 x i32> %val, <vscale x 4 x i32>* %a, <vscale x 4 x i1> %mask) nounwind {
+define void @masked_store_nxv4i32(<vscale x 4 x i32> %val, ptr %a, <vscale x 4 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_store_nxv4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.v4i32.p0(<vscale x 4 x i32> %val, <vscale x 4 x i32>* %a, i32 4, <vscale x 4 x i1> %mask)
+  call void @llvm.masked.store.v4i32.p0(<vscale x 4 x i32> %val, ptr %a, i32 4, <vscale x 4 x i1> %mask)
   ret void
 }
-declare void @llvm.masked.store.v4i32.p0(<vscale x 4 x i32>, <vscale x 4 x i32>*, i32, <vscale x 4 x i1>)
+declare void @llvm.masked.store.v4i32.p0(<vscale x 4 x i32>, ptr, i32, <vscale x 4 x i1>)
 
-define void @masked_store_nxv4i64(<vscale x 4 x i64> %val, <vscale x 4 x i64>* %a, <vscale x 4 x i1> %mask) nounwind {
+define void @masked_store_nxv4i64(<vscale x 4 x i64> %val, ptr %a, <vscale x 4 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_store_nxv4i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.v4i64.p0(<vscale x 4 x i64> %val, <vscale x 4 x i64>* %a, i32 8, <vscale x 4 x i1> %mask)
+  call void @llvm.masked.store.v4i64.p0(<vscale x 4 x i64> %val, ptr %a, i32 8, <vscale x 4 x i1> %mask)
   ret void
 }
-declare void @llvm.masked.store.v4i64.p0(<vscale x 4 x i64>, <vscale x 4 x i64>*, i32, <vscale x 4 x i1>)
+declare void @llvm.masked.store.v4i64.p0(<vscale x 4 x i64>, ptr, i32, <vscale x 4 x i1>)
 
-define void @masked_store_nxv8i8(<vscale x 8 x i8> %val, <vscale x 8 x i8>* %a, <vscale x 8 x i1> %mask) nounwind {
+define void @masked_store_nxv8i8(<vscale x 8 x i8> %val, ptr %a, <vscale x 8 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_store_nxv8i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vse8.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.v8i8.p0(<vscale x 8 x i8> %val, <vscale x 8 x i8>* %a, i32 1, <vscale x 8 x i1> %mask)
+  call void @llvm.masked.store.v8i8.p0(<vscale x 8 x i8> %val, ptr %a, i32 1, <vscale x 8 x i1> %mask)
   ret void
 }
-declare void @llvm.masked.store.v8i8.p0(<vscale x 8 x i8>, <vscale x 8 x i8>*, i32, <vscale x 8 x i1>)
+declare void @llvm.masked.store.v8i8.p0(<vscale x 8 x i8>, ptr, i32, <vscale x 8 x i1>)
 
-define void @masked_store_nxv8i16(<vscale x 8 x i16> %val, <vscale x 8 x i16>* %a, <vscale x 8 x i1> %mask) nounwind {
+define void @masked_store_nxv8i16(<vscale x 8 x i16> %val, ptr %a, <vscale x 8 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_store_nxv8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.v8i16.p0(<vscale x 8 x i16> %val, <vscale x 8 x i16>* %a, i32 2, <vscale x 8 x i1> %mask)
+  call void @llvm.masked.store.v8i16.p0(<vscale x 8 x i16> %val, ptr %a, i32 2, <vscale x 8 x i1> %mask)
   ret void
 }
-declare void @llvm.masked.store.v8i16.p0(<vscale x 8 x i16>, <vscale x 8 x i16>*, i32, <vscale x 8 x i1>)
+declare void @llvm.masked.store.v8i16.p0(<vscale x 8 x i16>, ptr, i32, <vscale x 8 x i1>)
 
-define void @masked_store_nxv8i32(<vscale x 8 x i32> %val, <vscale x 8 x i32>* %a, <vscale x 8 x i1> %mask) nounwind {
+define void @masked_store_nxv8i32(<vscale x 8 x i32> %val, ptr %a, <vscale x 8 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_store_nxv8i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.v8i32.p0(<vscale x 8 x i32> %val, <vscale x 8 x i32>* %a, i32 4, <vscale x 8 x i1> %mask)
+  call void @llvm.masked.store.v8i32.p0(<vscale x 8 x i32> %val, ptr %a, i32 4, <vscale x 8 x i1> %mask)
   ret void
 }
-declare void @llvm.masked.store.v8i32.p0(<vscale x 8 x i32>, <vscale x 8 x i32>*, i32, <vscale x 8 x i1>)
+declare void @llvm.masked.store.v8i32.p0(<vscale x 8 x i32>, ptr, i32, <vscale x 8 x i1>)
 
-define void @masked_store_nxv8i64(<vscale x 8 x i64> %val, <vscale x 8 x i64>* %a, <vscale x 8 x i1> %mask) nounwind {
+define void @masked_store_nxv8i64(<vscale x 8 x i64> %val, ptr %a, <vscale x 8 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_store_nxv8i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.v8i64.p0(<vscale x 8 x i64> %val, <vscale x 8 x i64>* %a, i32 8, <vscale x 8 x i1> %mask)
+  call void @llvm.masked.store.v8i64.p0(<vscale x 8 x i64> %val, ptr %a, i32 8, <vscale x 8 x i1> %mask)
   ret void
 }
-declare void @llvm.masked.store.v8i64.p0(<vscale x 8 x i64>, <vscale x 8 x i64>*, i32, <vscale x 8 x i1>)
+declare void @llvm.masked.store.v8i64.p0(<vscale x 8 x i64>, ptr, i32, <vscale x 8 x i1>)
 
-define void @masked_store_nxv16i8(<vscale x 16 x i8> %val, <vscale x 16 x i8>* %a, <vscale x 16 x i1> %mask) nounwind {
+define void @masked_store_nxv16i8(<vscale x 16 x i8> %val, ptr %a, <vscale x 16 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_store_nxv16i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
 ; CHECK-NEXT:    vse8.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.v16i8.p0(<vscale x 16 x i8> %val, <vscale x 16 x i8>* %a, i32 1, <vscale x 16 x i1> %mask)
+  call void @llvm.masked.store.v16i8.p0(<vscale x 16 x i8> %val, ptr %a, i32 1, <vscale x 16 x i1> %mask)
   ret void
 }
-declare void @llvm.masked.store.v16i8.p0(<vscale x 16 x i8>, <vscale x 16 x i8>*, i32, <vscale x 16 x i1>)
+declare void @llvm.masked.store.v16i8.p0(<vscale x 16 x i8>, ptr, i32, <vscale x 16 x i1>)
 
-define void @masked_store_nxv16i16(<vscale x 16 x i16> %val, <vscale x 16 x i16>* %a, <vscale x 16 x i1> %mask) nounwind {
+define void @masked_store_nxv16i16(<vscale x 16 x i16> %val, ptr %a, <vscale x 16 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_store_nxv16i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.v16i16.p0(<vscale x 16 x i16> %val, <vscale x 16 x i16>* %a, i32 2, <vscale x 16 x i1> %mask)
+  call void @llvm.masked.store.v16i16.p0(<vscale x 16 x i16> %val, ptr %a, i32 2, <vscale x 16 x i1> %mask)
   ret void
 }
-declare void @llvm.masked.store.v16i16.p0(<vscale x 16 x i16>, <vscale x 16 x i16>*, i32, <vscale x 16 x i1>)
+declare void @llvm.masked.store.v16i16.p0(<vscale x 16 x i16>, ptr, i32, <vscale x 16 x i1>)
 
-define void @masked_store_nxv16i32(<vscale x 16 x i32> %val, <vscale x 16 x i32>* %a, <vscale x 16 x i1> %mask) nounwind {
+define void @masked_store_nxv16i32(<vscale x 16 x i32> %val, ptr %a, <vscale x 16 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_store_nxv16i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.v16i32.p0(<vscale x 16 x i32> %val, <vscale x 16 x i32>* %a, i32 4, <vscale x 16 x i1> %mask)
+  call void @llvm.masked.store.v16i32.p0(<vscale x 16 x i32> %val, ptr %a, i32 4, <vscale x 16 x i1> %mask)
   ret void
 }
-declare void @llvm.masked.store.v16i32.p0(<vscale x 16 x i32>, <vscale x 16 x i32>*, i32, <vscale x 16 x i1>)
+declare void @llvm.masked.store.v16i32.p0(<vscale x 16 x i32>, ptr, i32, <vscale x 16 x i1>)
 
-define void @masked_store_nxv32i8(<vscale x 32 x i8> %val, <vscale x 32 x i8>* %a, <vscale x 32 x i1> %mask) nounwind {
+define void @masked_store_nxv32i8(<vscale x 32 x i8> %val, ptr %a, <vscale x 32 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_store_nxv32i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
 ; CHECK-NEXT:    vse8.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.v32i8.p0(<vscale x 32 x i8> %val, <vscale x 32 x i8>* %a, i32 1, <vscale x 32 x i1> %mask)
+  call void @llvm.masked.store.v32i8.p0(<vscale x 32 x i8> %val, ptr %a, i32 1, <vscale x 32 x i1> %mask)
   ret void
 }
-declare void @llvm.masked.store.v32i8.p0(<vscale x 32 x i8>, <vscale x 32 x i8>*, i32, <vscale x 32 x i1>)
+declare void @llvm.masked.store.v32i8.p0(<vscale x 32 x i8>, ptr, i32, <vscale x 32 x i1>)
 
-define void @masked_store_nxv32i16(<vscale x 32 x i16> %val, <vscale x 32 x i16>* %a, <vscale x 32 x i1> %mask) nounwind {
+define void @masked_store_nxv32i16(<vscale x 32 x i16> %val, ptr %a, <vscale x 32 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_store_nxv32i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.v32i16.p0(<vscale x 32 x i16> %val, <vscale x 32 x i16>* %a, i32 2, <vscale x 32 x i1> %mask)
+  call void @llvm.masked.store.v32i16.p0(<vscale x 32 x i16> %val, ptr %a, i32 2, <vscale x 32 x i1> %mask)
   ret void
 }
-declare void @llvm.masked.store.v32i16.p0(<vscale x 32 x i16>, <vscale x 32 x i16>*, i32, <vscale x 32 x i1>)
+declare void @llvm.masked.store.v32i16.p0(<vscale x 32 x i16>, ptr, i32, <vscale x 32 x i1>)
 
-define void @masked_store_nxv64i8(<vscale x 64 x i8> %val, <vscale x 64 x i8>* %a, <vscale x 64 x i1> %mask) nounwind {
+define void @masked_store_nxv64i8(<vscale x 64 x i8> %val, ptr %a, <vscale x 64 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_store_nxv64i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, ma
 ; CHECK-NEXT:    vse8.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.v64i8.p0(<vscale x 64 x i8> %val, <vscale x 64 x i8>* %a, i32 4, <vscale x 64 x i1> %mask)
+  call void @llvm.masked.store.v64i8.p0(<vscale x 64 x i8> %val, ptr %a, i32 4, <vscale x 64 x i1> %mask)
   ret void
 }
-declare void @llvm.masked.store.v64i8.p0(<vscale x 64 x i8>, <vscale x 64 x i8>*, i32, <vscale x 64 x i1>)
+declare void @llvm.masked.store.v64i8.p0(<vscale x 64 x i8>, ptr, i32, <vscale x 64 x i1>)
 
-define void @masked_store_zero_mask(<vscale x 2 x i8> %val, <vscale x 2 x i8>* %a) nounwind {
+define void @masked_store_zero_mask(<vscale x 2 x i8> %val, ptr %a) nounwind {
 ; CHECK-LABEL: masked_store_zero_mask:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.v2i8.p0(<vscale x 2 x i8> %val, <vscale x 2 x i8>* %a, i32 1, <vscale x 2 x i1> zeroinitializer)
+  call void @llvm.masked.store.v2i8.p0(<vscale x 2 x i8> %val, ptr %a, i32 1, <vscale x 2 x i1> zeroinitializer)
   ret void
 }
 
-define void @masked_store_allones_mask(<vscale x 2 x i8> %val, <vscale x 2 x i8>* %a) nounwind {
+define void @masked_store_allones_mask(<vscale x 2 x i8> %val, ptr %a) nounwind {
 ; CHECK-LABEL: masked_store_allones_mask:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
@@ -260,6 +260,6 @@ define void @masked_store_allones_mask(<vscale x 2 x i8> %val, <vscale x 2 x i8>
 ; CHECK-NEXT:    ret
   %insert = insertelement <vscale x 2 x i1> poison, i1 1, i32 0
   %mask = shufflevector <vscale x 2 x i1> %insert, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  call void @llvm.masked.store.v2i8.p0(<vscale x 2 x i8> %val, <vscale x 2 x i8>* %a, i32 1, <vscale x 2 x i1> %mask)
+  call void @llvm.masked.store.v2i8.p0(<vscale x 2 x i8> %val, ptr %a, i32 1, <vscale x 2 x i1> %mask)
   ret void
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll
index 3c9fe78a86671..d81079da64bd3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll
@@ -6,12 +6,12 @@
 
 declare <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vle_mask_nxv1i64_nxv1i64(<vscale x 1 x i64>* %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vle_mask_nxv1i64_nxv1i64(ptr %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -20,7 +20,7 @@ define <vscale x 1 x i64> @intrinsic_vle_mask_nxv1i64_nxv1i64(<vscale x 1 x i64>
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
     <vscale x 1 x i64> undef,
-    <vscale x 1 x i64>* %0,
+    ptr %0,
     <vscale x 1 x i1> %1,
     iXLen %2, iXLen 3)
 
@@ -29,12 +29,12 @@ entry:
 
 declare { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv1i64(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vleff_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64>* %0, <vscale x 1 x i1> %1, iXLen %2, iXLen* %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vleff_mask_v_nxv1i64_nxv1i64(ptr %0, <vscale x 1 x i1> %1, iXLen %2, iXLen* %3) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -53,7 +53,7 @@ define <vscale x 1 x i64> @intrinsic_vleff_mask_v_nxv1i64_nxv1i64(<vscale x 1 x
 entry:
   %a = call { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv1i64(
     <vscale x 1 x i64> undef,
-    <vscale x 1 x i64>* %0,
+    ptr %0,
     <vscale x 1 x i1> %1,
     iXLen %2, iXLen 3)
   %b = extractvalue { <vscale x 1 x i64>, iXLen } %a, 0
@@ -65,13 +65,13 @@ entry:
 
 declare <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   iXLen,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vlse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64>* %0, iXLen %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vlse_mask_v_nxv1i64_nxv1i64(ptr %0, iXLen %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
@@ -80,7 +80,7 @@ define <vscale x 1 x i64> @intrinsic_vlse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64(
     <vscale x 1 x i64> undef,
-    <vscale x 1 x i64>* %0,
+    ptr %0,
     iXLen %1,
     <vscale x 1 x i1> %2,
     iXLen %3, iXLen 3)
@@ -90,17 +90,17 @@ entry:
 
 declare <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   <vscale x 1 x iXLen>,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1iXLen(<vscale x 1 x i8>* %0, <vscale x 1 x iXLen> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1iXLen(ptr %0, <vscale x 1 x iXLen> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen(
     <vscale x 1 x i8> undef,
-    <vscale x 1 x i8>* %0,
+    ptr %0,
     <vscale x 1 x iXLen> %1,
     <vscale x 1 x i1> %2,
     iXLen %3, iXLen 3)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll
index b11d86882ebc0..c8bff58b00e47 100644
--- a/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll
@@ -6,11 +6,11 @@
 
 declare <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   <vscale x 1 x i1>,
   iXLen,
   iXLen)
-define <vscale x 1 x i64> @intrinsic_vle_mask_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vle_mask_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -19,7 +19,7 @@ define <vscale x 1 x i64> @intrinsic_vle_mask_nxv1i64_nxv1i64(<vscale x 1 x i64>
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
     <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
+    ptr %1,
     <vscale x 1 x i1> %2,
     iXLen %3, iXLen 1)
 
@@ -28,11 +28,11 @@ entry:
 
 declare { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv1i64(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   <vscale x 1 x i1>,
   iXLen,
   iXLen)
-define <vscale x 1 x i64> @intrinsic_vleff_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vleff_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -51,7 +51,7 @@ define <vscale x 1 x i64> @intrinsic_vleff_mask_v_nxv1i64_nxv1i64(<vscale x 1 x
 entry:
   %a = call { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv1i64(
     <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
+    ptr %1,
     <vscale x 1 x i1> %2,
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 1 x i64>, iXLen } %a, 0
@@ -63,12 +63,12 @@ entry:
 
 declare <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   iXLen,
   <vscale x 1 x i1>,
   iXLen,
   iXLen)
-define <vscale x 1 x i64> @intrinsic_vlse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vlse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
@@ -77,7 +77,7 @@ define <vscale x 1 x i64> @intrinsic_vlse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64(
     <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -87,16 +87,16 @@ entry:
 
 declare <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   <vscale x 1 x iXLen>,
   <vscale x 1 x i1>,
   iXLen,
   iXLen)
-define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1iXLen(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x iXLen> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1iXLen(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x iXLen> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen(
     <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
+    ptr %1,
     <vscale x 1 x iXLen> %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll
index 8e5035e59262b..409a008ec7cf7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll
@@ -6,11 +6,11 @@
 
 declare <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   <vscale x 1 x i1>,
   iXLen,
   iXLen)
-define <vscale x 1 x i64> @intrinsic_vle_mask_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vle_mask_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, ma
@@ -19,7 +19,7 @@ define <vscale x 1 x i64> @intrinsic_vle_mask_nxv1i64_nxv1i64(<vscale x 1 x i64>
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
     <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
+    ptr %1,
     <vscale x 1 x i1> %2,
     iXLen %3, iXLen 2)
 
@@ -28,11 +28,11 @@ entry:
 
 declare { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv1i64(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   <vscale x 1 x i1>,
   iXLen,
   iXLen)
-define <vscale x 1 x i64> @intrinsic_vleff_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vleff_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e64, m1, tu, ma
@@ -51,7 +51,7 @@ define <vscale x 1 x i64> @intrinsic_vleff_mask_v_nxv1i64_nxv1i64(<vscale x 1 x
 entry:
   %a = call { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv1i64(
     <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
+    ptr %1,
     <vscale x 1 x i1> %2,
     iXLen %3, iXLen 2)
   %b = extractvalue { <vscale x 1 x i64>, iXLen } %a, 0
@@ -63,12 +63,12 @@ entry:
 
 declare <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   iXLen,
   <vscale x 1 x i1>,
   iXLen,
   iXLen)
-define <vscale x 1 x i64> @intrinsic_vlse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vlse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, tu, ma
@@ -77,7 +77,7 @@ define <vscale x 1 x i64> @intrinsic_vlse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64(
     <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 2)
@@ -87,16 +87,16 @@ entry:
 
 declare <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   <vscale x 1 x iXLen>,
   <vscale x 1 x i1>,
   iXLen,
   iXLen)
-define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1iXLen(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x iXLen> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1iXLen(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x iXLen> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen(
     <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
+    ptr %1,
     <vscale x 1 x iXLen> %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 2)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll
index cabb0db215fd2..90054bcc5f36e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll
@@ -6,11 +6,11 @@
 
 declare <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   <vscale x 1 x i1>,
   iXLen,
   iXLen)
-define <vscale x 1 x i64> @intrinsic_vle_mask_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vle_mask_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
@@ -19,7 +19,7 @@ define <vscale x 1 x i64> @intrinsic_vle_mask_nxv1i64_nxv1i64(<vscale x 1 x i64>
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
     <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
+    ptr %1,
     <vscale x 1 x i1> %2,
     iXLen %3, iXLen 0)
 
@@ -28,11 +28,11 @@ entry:
 
 declare { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv1i64(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   <vscale x 1 x i1>,
   iXLen,
   iXLen)
-define <vscale x 1 x i64> @intrinsic_vleff_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vleff_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
@@ -51,7 +51,7 @@ define <vscale x 1 x i64> @intrinsic_vleff_mask_v_nxv1i64_nxv1i64(<vscale x 1 x
 entry:
   %a = call { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv1i64(
     <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
+    ptr %1,
     <vscale x 1 x i1> %2,
     iXLen %3, iXLen 0)
   %b = extractvalue { <vscale x 1 x i64>, iXLen } %a, 0
@@ -63,12 +63,12 @@ entry:
 
 declare <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   iXLen,
   <vscale x 1 x i1>,
   iXLen,
   iXLen)
-define <vscale x 1 x i64> @intrinsic_vlse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vlse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, tu, mu
@@ -77,7 +77,7 @@ define <vscale x 1 x i64> @intrinsic_vlse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64(
     <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 0)
@@ -87,16 +87,16 @@ entry:
 
 declare <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   <vscale x 1 x iXLen>,
   <vscale x 1 x i1>,
   iXLen,
   iXLen)
-define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1iXLen(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x iXLen> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1iXLen(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x iXLen> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen(
     <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
+    ptr %1,
     <vscale x 1 x iXLen> %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 0)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/memory-args.ll b/llvm/test/CodeGen/RISCV/rvv/memory-args.ll
index bdfec92d2305e..87dd0048d928a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/memory-args.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/memory-args.ll
@@ -67,9 +67,9 @@ define <vscale x 64 x i8> @caller() {
   %local0 = alloca <vscale x 64 x i8>
   %local1 = alloca <vscale x 64 x i8>
   %local2 = alloca <vscale x 64 x i8>
-  %arg0 = load volatile <vscale x 64 x i8>, <vscale x 64 x i8>* %local0
-  %arg1 = load volatile <vscale x 64 x i8>, <vscale x 64 x i8>* %local1
-  %arg2 = load volatile <vscale x 64 x i8>, <vscale x 64 x i8>* %local2
+  %arg0 = load volatile <vscale x 64 x i8>, ptr %local0
+  %arg1 = load volatile <vscale x 64 x i8>, ptr %local1
+  %arg2 = load volatile <vscale x 64 x i8>, ptr %local2
   %ret = call <vscale x 64 x i8> @callee(<vscale x 64 x i8> %arg0,
                                          <vscale x 64 x i8> %arg1,
                                          <vscale x 64 x i8> %arg2)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
index 3a9d2556aff63..07dcddd9c6860 100644
--- a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
@@ -1204,7 +1204,7 @@ declare <vscale x 16 x i64> @llvm.masked.gather.nxv16i64.nxv16p0(<vscale x 16 x
 declare <vscale x 16 x i64> @llvm.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64>, <vscale x 8 x i64>, i64 %idx)
 declare <vscale x 16 x ptr> @llvm.vector.insert.nxv8p0.nxv16p0(<vscale x 16 x ptr>, <vscale x 8 x ptr>, i64 %idx)
 
-define void @mgather_nxv16i64(<vscale x 8 x ptr> %ptrs0, <vscale x 8 x ptr> %ptrs1, <vscale x 16 x i1> %m, <vscale x 8 x i64> %passthru0, <vscale x 8 x i64> %passthru1, <vscale x 16 x i64>* %out) {
+define void @mgather_nxv16i64(<vscale x 8 x ptr> %ptrs0, <vscale x 8 x ptr> %ptrs1, <vscale x 16 x i1> %m, <vscale x 8 x i64> %passthru0, <vscale x 8 x i64> %passthru1, ptr %out) {
 ; RV32-LABEL: mgather_nxv16i64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vl8re64.v v24, (a0)
@@ -1261,7 +1261,7 @@ define void @mgather_nxv16i64(<vscale x 8 x ptr> %ptrs0, <vscale x 8 x ptr> %ptr
   %pt1 = call <vscale x 16 x i64> @llvm.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64> %pt0, <vscale x 8 x i64> %passthru1, i64 8)
 
   %v = call <vscale x 16 x i64> @llvm.masked.gather.nxv16i64.nxv16p0(<vscale x 16 x ptr> %p1, i32 8, <vscale x 16 x i1> %m, <vscale x 16 x i64> %pt1)
-  store <vscale x 16 x i64> %v, <vscale x 16 x i64>* %out
+  store <vscale x 16 x i64> %v, ptr %out
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/narrow-shift-extend.ll b/llvm/test/CodeGen/RISCV/rvv/narrow-shift-extend.ll
index b966aea720a92..f608a63d6bb9b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/narrow-shift-extend.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/narrow-shift-extend.ll
@@ -3,11 +3,11 @@
 
 declare <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   <vscale x 4 x i64>,
   i64);
 
-define <vscale x 4 x i32> @test_vloxei(<vscale x 4 x i32>* %ptr, <vscale x 4 x i8> %offset, i64 %vl) {
+define <vscale x 4 x i32> @test_vloxei(ptr %ptr, <vscale x 4 x i8> %offset, i64 %vl) {
 ; CHECK-LABEL: test_vloxei:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e64, m4, ta, ma
@@ -23,13 +23,13 @@ entry:
   %shl = shl <vscale x 4 x i64> %offset.ext, %shamt.vec
   %res = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64(
     <vscale x 4 x i32> undef,
-    <vscale x 4 x i32>* %ptr,
+    ptr %ptr,
     <vscale x 4 x i64> %shl,
     i64 %vl)
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x i32> @test_vloxei2(<vscale x 4 x i32>* %ptr, <vscale x 4 x i8> %offset, i64 %vl) {
+define <vscale x 4 x i32> @test_vloxei2(ptr %ptr, <vscale x 4 x i8> %offset, i64 %vl) {
 ; CHECK-LABEL: test_vloxei2:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e64, m4, ta, ma
@@ -45,13 +45,13 @@ entry:
   %shl = shl <vscale x 4 x i64> %offset.ext, %shamt.vec
   %res = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64(
     <vscale x 4 x i32> undef,
-    <vscale x 4 x i32>* %ptr,
+    ptr %ptr,
     <vscale x 4 x i64> %shl,
     i64 %vl)
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x i32> @test_vloxei3(<vscale x 4 x i32>* %ptr, <vscale x 4 x i8> %offset, i64 %vl) {
+define <vscale x 4 x i32> @test_vloxei3(ptr %ptr, <vscale x 4 x i8> %offset, i64 %vl) {
 ; CHECK-LABEL: test_vloxei3:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e64, m4, ta, ma
@@ -67,7 +67,7 @@ entry:
   %shl = shl <vscale x 4 x i64> %offset.ext, %shamt.vec
   %res = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64(
     <vscale x 4 x i32> undef,
-    <vscale x 4 x i32>* %ptr,
+    ptr %ptr,
     <vscale x 4 x i64> %shl,
     i64 %vl)
   ret <vscale x 4 x i32> %res
@@ -75,7 +75,7 @@ entry:
 
 ; Test use vp.zext to extend.
 declare <vscale x 4 x i64> @llvm.vp.zext.nxvi64.nxv1i8(<vscale x 4 x i8>, <vscale x 4 x i1>, i32)
-define <vscale x 4 x i32> @test_vloxei4(<vscale x 4 x i32>* %ptr, <vscale x 4 x i8> %offset, <vscale x 4 x i1> %m, i32 zeroext %vl) {
+define <vscale x 4 x i32> @test_vloxei4(ptr %ptr, <vscale x 4 x i8> %offset, <vscale x 4 x i1> %m, i32 zeroext %vl) {
 ; CHECK-LABEL: test_vloxei4:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -93,7 +93,7 @@ entry:
   %vl.i64 = zext i32 %vl to i64
   %res = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64(
     <vscale x 4 x i32> undef,
-    <vscale x 4 x i32>* %ptr,
+    ptr %ptr,
     <vscale x 4 x i64> %shl,
     i64 %vl.i64)
   ret <vscale x 4 x i32> %res
@@ -102,10 +102,10 @@ entry:
 ; Test orignal extnened type is enough narrow.
 declare <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i16(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   <vscale x 4 x i16>,
   i64);
-define <vscale x 4 x i32> @test_vloxei5(<vscale x 4 x i32>* %ptr, <vscale x 4 x i8> %offset, i64 %vl) {
+define <vscale x 4 x i32> @test_vloxei5(ptr %ptr, <vscale x 4 x i8> %offset, i64 %vl) {
 ; CHECK-LABEL: test_vloxei5:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e16, m1, ta, ma
@@ -121,13 +121,13 @@ entry:
   %shl = shl <vscale x 4 x i16> %offset.ext, %shamt.vec
   %res = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i16(
     <vscale x 4 x i32> undef,
-    <vscale x 4 x i32>* %ptr,
+    ptr %ptr,
     <vscale x 4 x i16> %shl,
     i64 %vl)
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x i32> @test_vloxei6(<vscale x 4 x i32>* %ptr, <vscale x 4 x i7> %offset, i64 %vl) {
+define <vscale x 4 x i32> @test_vloxei6(ptr %ptr, <vscale x 4 x i7> %offset, i64 %vl) {
 ; CHECK-LABEL: test_vloxei6:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    li a2, 127
@@ -146,13 +146,13 @@ entry:
   %shl = shl <vscale x 4 x i64> %offset.ext, %shamt.vec
   %res = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64(
     <vscale x 4 x i32> undef,
-    <vscale x 4 x i32>* %ptr,
+    ptr %ptr,
     <vscale x 4 x i64> %shl,
     i64 %vl)
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x i32> @test_vloxei7(<vscale x 4 x i32>* %ptr, <vscale x 4 x i1> %offset, i64 %vl) {
+define <vscale x 4 x i32> @test_vloxei7(ptr %ptr, <vscale x 4 x i1> %offset, i64 %vl) {
 ; CHECK-LABEL: test_vloxei7:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e64, m4, ta, ma
@@ -169,7 +169,7 @@ entry:
   %shl = shl <vscale x 4 x i64> %offset.ext, %shamt.vec
   %res = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64(
     <vscale x 4 x i32> undef,
-    <vscale x 4 x i32>* %ptr,
+    ptr %ptr,
     <vscale x 4 x i64> %shl,
     i64 %vl)
   ret <vscale x 4 x i32> %res
@@ -177,13 +177,13 @@ entry:
 
 declare <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
   i64,
   i64);
 
-define <vscale x 4 x i32> @test_vloxei_mask(<vscale x 4 x i32>* %ptr, <vscale x 4 x i8> %offset, <vscale x 4 x i1> %m, i64 %vl) {
+define <vscale x 4 x i32> @test_vloxei_mask(ptr %ptr, <vscale x 4 x i8> %offset, <vscale x 4 x i1> %m, i64 %vl) {
 ; CHECK-LABEL: test_vloxei_mask:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e64, m4, ta, ma
@@ -199,7 +199,7 @@ entry:
   %shl = shl <vscale x 4 x i64> %offset.ext, %shamt.vec
   %res = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> undef,
-    <vscale x 4 x i32>* %ptr,
+    ptr %ptr,
     <vscale x 4 x i64> %shl,
     <vscale x 4 x i1> %m,
     i64 %vl, i64 1)
@@ -208,11 +208,11 @@ entry:
 
 declare <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i64(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   <vscale x 4 x i64>,
   i64);
 
-define <vscale x 4 x i32> @test_vluxei(<vscale x 4 x i32>* %ptr, <vscale x 4 x i8> %offset, i64 %vl) {
+define <vscale x 4 x i32> @test_vluxei(ptr %ptr, <vscale x 4 x i8> %offset, i64 %vl) {
 ; CHECK-LABEL: test_vluxei:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e64, m4, ta, ma
@@ -228,7 +228,7 @@ entry:
   %shl = shl <vscale x 4 x i64> %offset.ext, %shamt.vec
   %res = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i64(
     <vscale x 4 x i32> undef,
-    <vscale x 4 x i32>* %ptr,
+    ptr %ptr,
     <vscale x 4 x i64> %shl,
     i64 %vl)
   ret <vscale x 4 x i32> %res
@@ -236,13 +236,13 @@ entry:
 
 declare <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
   i64,
   i64);
 
-define <vscale x 4 x i32> @test_vluxei_mask(<vscale x 4 x i32>* %ptr, <vscale x 4 x i8> %offset, <vscale x 4 x i1> %m, i64 %vl) {
+define <vscale x 4 x i32> @test_vluxei_mask(ptr %ptr, <vscale x 4 x i8> %offset, <vscale x 4 x i1> %m, i64 %vl) {
 ; CHECK-LABEL: test_vluxei_mask:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e64, m4, ta, ma
@@ -258,7 +258,7 @@ entry:
   %shl = shl <vscale x 4 x i64> %offset.ext, %shamt.vec
   %res = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> undef,
-    <vscale x 4 x i32>* %ptr,
+    ptr %ptr,
     <vscale x 4 x i64> %shl,
     <vscale x 4 x i1> %m,
     i64 %vl, i64 1)
@@ -267,11 +267,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i64(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   <vscale x 4 x i64>,
   i64);
 
-define void @test_vsoxei(<vscale x 4 x i32> %val, <vscale x 4 x i32>* %ptr, <vscale x 4 x i8> %offset, i64 %vl) {
+define void @test_vsoxei(<vscale x 4 x i32> %val, ptr %ptr, <vscale x 4 x i8> %offset, i64 %vl) {
 ; CHECK-LABEL: test_vsoxei:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e64, m4, ta, ma
@@ -287,7 +287,7 @@ entry:
   %shl = shl <vscale x 4 x i64> %offset.ext, %shamt.vec
   call void @llvm.riscv.vsoxei.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %val,
-    <vscale x 4 x i32>* %ptr,
+    ptr %ptr,
     <vscale x 4 x i64> %shl,
     i64 %vl)
   ret void
@@ -295,12 +295,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
   i64);
 
-define void @test_vsoxei_mask(<vscale x 4 x i32> %val, <vscale x 4 x i32>* %ptr, <vscale x 4 x i8> %offset, <vscale x 4 x i1> %m, i64 %vl) {
+define void @test_vsoxei_mask(<vscale x 4 x i32> %val, ptr %ptr, <vscale x 4 x i8> %offset, <vscale x 4 x i1> %m, i64 %vl) {
 ; CHECK-LABEL: test_vsoxei_mask:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e64, m4, ta, ma
@@ -316,7 +316,7 @@ entry:
   %shl = shl <vscale x 4 x i64> %offset.ext, %shamt.vec
   call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %val,
-    <vscale x 4 x i32>* %ptr,
+    ptr %ptr,
     <vscale x 4 x i64> %shl,
     <vscale x 4 x i1> %m,
     i64 %vl)
@@ -325,11 +325,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i64(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   <vscale x 4 x i64>,
   i64);
 
-define void @test_vsuxei(<vscale x 4 x i32> %val, <vscale x 4 x i32>* %ptr, <vscale x 4 x i8> %offset, i64 %vl) {
+define void @test_vsuxei(<vscale x 4 x i32> %val, ptr %ptr, <vscale x 4 x i8> %offset, i64 %vl) {
 ; CHECK-LABEL: test_vsuxei:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e64, m4, ta, ma
@@ -345,7 +345,7 @@ entry:
   %shl = shl <vscale x 4 x i64> %offset.ext, %shamt.vec
   call void @llvm.riscv.vsuxei.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %val,
-    <vscale x 4 x i32>* %ptr,
+    ptr %ptr,
     <vscale x 4 x i64> %shl,
     i64 %vl)
   ret void
@@ -353,12 +353,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
   i64);
 
-define void @test_vsuxei_mask(<vscale x 4 x i32> %val, <vscale x 4 x i32>* %ptr, <vscale x 4 x i8> %offset, <vscale x 4 x i1> %m, i64 %vl) {
+define void @test_vsuxei_mask(<vscale x 4 x i32> %val, ptr %ptr, <vscale x 4 x i8> %offset, <vscale x 4 x i1> %m, i64 %vl) {
 ; CHECK-LABEL: test_vsuxei_mask:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e64, m4, ta, ma
@@ -374,7 +374,7 @@ entry:
   %shl = shl <vscale x 4 x i64> %offset.ext, %shamt.vec
   call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %val,
-    <vscale x 4 x i32>* %ptr,
+    ptr %ptr,
     <vscale x 4 x i64> %shl,
     <vscale x 4 x i1> %m,
     i64 %vl)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/rvv-framelayout.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-framelayout.ll
index 191d932923eb5..ab64459944885 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-framelayout.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-framelayout.ll
@@ -36,13 +36,13 @@ define void @rvv_vla(i64 %n, i64 %i) nounwind {
   %vla.addr = alloca i32, i64 %n
 
   %v1.addr = alloca <vscale x 1 x i64>
-  %v1 = load volatile <vscale x 1 x i64>, <vscale x 1 x i64>* %v1.addr
+  %v1 = load volatile <vscale x 1 x i64>, ptr %v1.addr
 
   %v2.addr = alloca <vscale x 2 x i64>
-  %v2 = load volatile <vscale x 2 x i64>, <vscale x 2 x i64>* %v2.addr
+  %v2 = load volatile <vscale x 2 x i64>, ptr %v2.addr
 
-  %p = getelementptr i32, i32* %vla.addr, i64 %i
-  %s = load volatile i32, i32* %p
+  %p = getelementptr i32, ptr %vla.addr, i64 %i
+  %s = load volatile i32, ptr %p
   ret void
 }
 
@@ -74,12 +74,12 @@ define void @rvv_overaligned() nounwind {
   %overaligned = alloca i32, align 64
 
   %v1.addr = alloca <vscale x 1 x i64>
-  %v1 = load volatile <vscale x 1 x i64>, <vscale x 1 x i64>* %v1.addr
+  %v1 = load volatile <vscale x 1 x i64>, ptr %v1.addr
 
   %v2.addr = alloca <vscale x 2 x i64>
-  %v2 = load volatile <vscale x 2 x i64>, <vscale x 2 x i64>* %v2.addr
+  %v2 = load volatile <vscale x 2 x i64>, ptr %v2.addr
 
-  %s = load volatile i32, i32* %overaligned, align 64
+  %s = load volatile i32, ptr %overaligned, align 64
   ret void
 }
 
@@ -123,14 +123,14 @@ define void @rvv_vla_and_overaligned(i64 %n, i64 %i) nounwind {
   %vla.addr = alloca i32, i64 %n
 
   %v1.addr = alloca <vscale x 1 x i64>
-  %v1 = load volatile <vscale x 1 x i64>, <vscale x 1 x i64>* %v1.addr
+  %v1 = load volatile <vscale x 1 x i64>, ptr %v1.addr
 
   %v2.addr = alloca <vscale x 2 x i64>
-  %v2 = load volatile <vscale x 2 x i64>, <vscale x 2 x i64>* %v2.addr
+  %v2 = load volatile <vscale x 2 x i64>, ptr %v2.addr
 
-  %s1 = load volatile i32, i32* %overaligned, align 64
-  %p = getelementptr i32, i32* %vla.addr, i64 %i
-  %s2 = load volatile i32, i32* %p
+  %s1 = load volatile i32, ptr %overaligned, align 64
+  %p = getelementptr i32, ptr %vla.addr, i64 %i
+  %s2 = load volatile i32, ptr %p
   ret void
 
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/rvv-out-arguments.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-out-arguments.ll
index 5a74108c90da7..d5ba11c8d19d0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-out-arguments.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-out-arguments.ll
@@ -48,7 +48,7 @@ entry:
   %x9.addr = alloca i32, align 4
   store i32 %x0, ptr %x0.addr, align 4
   store i32 %x1, ptr %x1.addr, align 4
-  store <vscale x 16 x i32> %v0, <vscale x 16 x i32>* %v0.addr, align 4
+  store <vscale x 16 x i32> %v0, ptr %v0.addr, align 4
   store i32 %x2, ptr %x2.addr, align 4
   store i32 %x3, ptr %x3.addr, align 4
   store i32 %x4, ptr %x4.addr, align 4
@@ -156,8 +156,8 @@ entry:
   %0 = call i64 @llvm.riscv.vsetvli.i64(i64 4, i64 2, i64 3)
   store i64 %0, ptr %vl, align 8
   %1 = load i64, ptr %vl, align 8
-  %2 = call <vscale x 16 x i32> @llvm.riscv.vle.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32>* %input, i64 %1)
-  store <vscale x 16 x i32> %2, <vscale x 16 x i32>* %v0, align 4
+  %2 = call <vscale x 16 x i32> @llvm.riscv.vle.nxv16i32.i64(<vscale x 16 x i32> undef, ptr %input, i64 %1)
+  store <vscale x 16 x i32> %2, ptr %v0, align 4
   store i32 1, ptr %x0, align 4
   store i32 1, ptr %x1, align 4
   store i32 1, ptr %x2, align 4
@@ -170,7 +170,7 @@ entry:
   store i32 1, ptr %x9, align 4
   %3 = load i32, ptr %x0, align 4
   %4 = load i32, ptr %x1, align 4
-  %5 = load <vscale x 16 x i32>, <vscale x 16 x i32>* %v0, align 4
+  %5 = load <vscale x 16 x i32>, ptr %v0, align 4
   %6 = load i32, ptr %x2, align 4
   %7 = load i32, ptr %x3, align 4
   %8 = load i32, ptr %x4, align 4
@@ -182,7 +182,7 @@ entry:
   call void @lots_args(i32 signext %3, i32 signext %4, <vscale x 16 x i32> %5, i32 signext %6, i32 signext %7, i32 signext %8, i32 signext %9, i32 signext %10, i32 %11, i32 %12, i32 %13)
   %14 = load i32, ptr %x0, align 4
   %15 = load i32, ptr %x1, align 4
-  %16 = load <vscale x 16 x i32>, <vscale x 16 x i32>* %v0, align 4
+  %16 = load <vscale x 16 x i32>, ptr %v0, align 4
   %17 = load i32, ptr %x2, align 4
   %18 = load i32, ptr %x3, align 4
   %19 = load i32, ptr %x4, align 4
@@ -199,6 +199,6 @@ declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg)
 
 declare i64 @llvm.riscv.vsetvli.i64(i64, i64 immarg, i64 immarg)
 
-declare <vscale x 16 x i32> @llvm.riscv.vle.nxv16i32.i64(<vscale x 16 x i32>, <vscale x 16 x i32>* nocapture, i64)
+declare <vscale x 16 x i32> @llvm.riscv.vle.nxv16i32.i64(<vscale x 16 x i32>, ptr nocapture, i64)
 
 attributes #0 = { noinline nounwind optnone "frame-pointer"="all" }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-masked-vops.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-masked-vops.ll
index 906eedb7970cd..5f381a307099d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-masked-vops.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-masked-vops.ll
@@ -75,7 +75,7 @@ define <vscale x 2 x i32> @vpmerge_vwadd(<vscale x 2 x i32> %passthru, <vscale x
 }
 declare <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16(<vscale x 2 x i32>, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64)
 
-define <vscale x 2 x i32> @vpmerge_vle(<vscale x 2 x i32> %passthru, <vscale x 2 x i32>* %p, <vscale x 2 x i1> %m, i64 %vl) {
+define <vscale x 2 x i32> @vpmerge_vle(<vscale x 2 x i32> %passthru, ptr %p, <vscale x 2 x i1> %m, i64 %vl) {
 ; CHECK-LABEL: vpmerge_vle:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
@@ -83,7 +83,7 @@ define <vscale x 2 x i32> @vpmerge_vle(<vscale x 2 x i32> %passthru, <vscale x 2
 ; CHECK-NEXT:    ret
   %a = call <vscale x 2 x i32> @llvm.riscv.vle.mask.nxv2i32(
     <vscale x 2 x i32> %passthru,
-    <vscale x 2 x i32>* %p,
+    ptr %p,
     <vscale x 2 x i1> %m,
     i64 %vl, i64 1)
   %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
@@ -91,7 +91,7 @@ define <vscale x 2 x i32> @vpmerge_vle(<vscale x 2 x i32> %passthru, <vscale x 2
   %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> %mask, i64 %vl)
   ret <vscale x 2 x i32> %b
 }
-declare <vscale x 2 x i32> @llvm.riscv.vle.mask.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>*, <vscale x 2 x i1>, i64, i64)
+declare <vscale x 2 x i32> @llvm.riscv.vle.mask.nxv2i32(<vscale x 2 x i32>, ptr, <vscale x 2 x i1>, i64, i64)
 
 declare <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, i64, <vscale x 2 x i1>, i64, i64)
 define <vscale x 2 x i32> @vpmerge_vslideup(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %v, i64 %x, <vscale x 2 x i1> %m, i64 %vl) {

diff  --git a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops-mir.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops-mir.ll
index d612298bf50e7..0544204cce792 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops-mir.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops-mir.ll
@@ -3,10 +3,10 @@
 
 declare <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1>, <vscale x 2 x i32>, <vscale x 2 x i32>, i32)
 declare <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1>, <vscale x 2 x i32>, <vscale x 2 x i32>, i32)
-declare <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(<vscale x 2 x i32> *, <vscale x 2 x i1>, i32)
+declare <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(ptr, <vscale x 2 x i1>, i32)
 
 ; Test result has chain output of true operand of merge.vvm.
-define void @vpmerge_vpload_store(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> * %p, <vscale x 2 x i1> %m, i32 zeroext %vl) {
+define void @vpmerge_vpload_store(<vscale x 2 x i32> %passthru, ptr %p, <vscale x 2 x i1> %m, i32 zeroext %vl) {
   ; CHECK-LABEL: name: vpmerge_vpload_store
   ; CHECK: bb.0 (%ir-block.0):
   ; CHECK-NEXT:   liveins: $v8, $x10, $v0, $x11
@@ -21,13 +21,13 @@ define void @vpmerge_vpload_store(<vscale x 2 x i32> %passthru, <vscale x 2 x i3
   ; CHECK-NEXT:   PseudoRET
   %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
   %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(<vscale x 2 x i32> * %p, <vscale x 2 x i1> %mask, i32 %vl)
+  %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(ptr %p, <vscale x 2 x i1> %mask, i32 %vl)
   %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
-  store <vscale x 2 x i32> %b, <vscale x 2 x i32> * %p
+  store <vscale x 2 x i32> %b, ptr %p
   ret void
 }
 
-define void @vpselect_vpload_store(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> * %p, <vscale x 2 x i1> %m, i32 zeroext %vl) {
+define void @vpselect_vpload_store(<vscale x 2 x i32> %passthru, ptr %p, <vscale x 2 x i1> %m, i32 zeroext %vl) {
   ; CHECK-LABEL: name: vpselect_vpload_store
   ; CHECK: bb.0 (%ir-block.0):
   ; CHECK-NEXT:   liveins: $v8, $x10, $v0, $x11
@@ -42,8 +42,8 @@ define void @vpselect_vpload_store(<vscale x 2 x i32> %passthru, <vscale x 2 x i
   ; CHECK-NEXT:   PseudoRET
   %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
   %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(<vscale x 2 x i32> * %p, <vscale x 2 x i1> %mask, i32 %vl)
+  %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(ptr %p, <vscale x 2 x i1> %mask, i32 %vl)
   %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
-  store <vscale x 2 x i32> %b, <vscale x 2 x i32> * %p
+  store <vscale x 2 x i32> %b, ptr %p
   ret void
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
index c639f092444fc..a4aef577bc9ae 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
@@ -173,8 +173,8 @@ define <vscale x 2 x float> @vpmerge_vpfptrunc(<vscale x 2 x float> %passthru, <
 }
 
 ; Test load operation by vp.load.
-declare <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(<vscale x 2 x i32> *, <vscale x 2 x i1>, i32)
-define <vscale x 2 x i32> @vpmerge_vpload(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> * %p, <vscale x 2 x i1> %m, i32 zeroext %vl) {
+declare <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(ptr, <vscale x 2 x i1>, i32)
+define <vscale x 2 x i32> @vpmerge_vpload(<vscale x 2 x i32> %passthru, ptr %p, <vscale x 2 x i1> %m, i32 zeroext %vl) {
 ; CHECK-LABEL: vpmerge_vpload:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
@@ -182,13 +182,13 @@ define <vscale x 2 x i32> @vpmerge_vpload(<vscale x 2 x i32> %passthru, <vscale
 ; CHECK-NEXT:    ret
   %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
   %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(<vscale x 2 x i32> * %p, <vscale x 2 x i1> %mask, i32 %vl)
+  %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(ptr %p, <vscale x 2 x i1> %mask, i32 %vl)
   %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
   ret <vscale x 2 x i32> %b
 }
 
 ; Test result has chain and glued node.
-define <vscale x 2 x i32> @vpmerge_vpload2(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> * %p, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, i32 zeroext %vl) {
+define <vscale x 2 x i32> @vpmerge_vpload2(<vscale x 2 x i32> %passthru, ptr %p, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, i32 zeroext %vl) {
 ; CHECK-LABEL: vpmerge_vpload2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -198,14 +198,14 @@ define <vscale x 2 x i32> @vpmerge_vpload2(<vscale x 2 x i32> %passthru, <vscale
 ; CHECK-NEXT:    ret
   %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
   %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(<vscale x 2 x i32> * %p, <vscale x 2 x i1> %mask, i32 %vl)
+  %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(ptr %p, <vscale x 2 x i1> %mask, i32 %vl)
   %m = call <vscale x 2 x i1> @llvm.vp.icmp.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, metadata !"eq", <vscale x 2 x i1> %mask, i32 %vl)
   %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
   ret <vscale x 2 x i32> %b
 }
 
 ; Test result has chain output of true operand of merge.vvm.
-define void @vpmerge_vpload_store(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> * %p, <vscale x 2 x i1> %m, i32 zeroext %vl) {
+define void @vpmerge_vpload_store(<vscale x 2 x i32> %passthru, ptr %p, <vscale x 2 x i1> %m, i32 zeroext %vl) {
 ; CHECK-LABEL: vpmerge_vpload_store:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
@@ -214,15 +214,15 @@ define void @vpmerge_vpload_store(<vscale x 2 x i32> %passthru, <vscale x 2 x i3
 ; CHECK-NEXT:    ret
   %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
   %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(<vscale x 2 x i32> * %p, <vscale x 2 x i1> %mask, i32 %vl)
+  %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(ptr %p, <vscale x 2 x i1> %mask, i32 %vl)
   %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
-  store <vscale x 2 x i32> %b, <vscale x 2 x i32> * %p
+  store <vscale x 2 x i32> %b, ptr %p
   ret void
 }
 
 ; FIXME: Merge vmerge.vvm and vleffN.v
-declare { <vscale x 2 x i32>, i64 } @llvm.riscv.vleff.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>*, i64)
-define <vscale x 2 x i32> @vpmerge_vleff(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> * %p, <vscale x 2 x i1> %m, i32 zeroext %vl) {
+declare { <vscale x 2 x i32>, i64 } @llvm.riscv.vleff.nxv2i32(<vscale x 2 x i32>, ptr, i64)
+define <vscale x 2 x i32> @vpmerge_vleff(<vscale x 2 x i32> %passthru, ptr %p, <vscale x 2 x i1> %m, i32 zeroext %vl) {
 ; CHECK-LABEL: vpmerge_vleff:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -231,36 +231,36 @@ define <vscale x 2 x i32> @vpmerge_vleff(<vscale x 2 x i32> %passthru, <vscale x
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
 ; CHECK-NEXT:    ret
   %1 = zext i32 %vl to i64
-  %a = call { <vscale x 2 x i32>, i64 } @llvm.riscv.vleff.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32>* %p, i64 %1)
+  %a = call { <vscale x 2 x i32>, i64 } @llvm.riscv.vleff.nxv2i32(<vscale x 2 x i32> undef, ptr %p, i64 %1)
   %b = extractvalue { <vscale x 2 x i32>, i64 } %a, 0
   %c = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %b, <vscale x 2 x i32> %passthru, i32 %vl)
   ret <vscale x 2 x i32> %c
 }
 
 ; Test strided load by riscv.vlse
-declare <vscale x 2 x i32> @llvm.riscv.vlse.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>*, i64, i64)
-define <vscale x 2 x i32> @vpmerge_vlse(<vscale x 2 x i32> %passthru,  <vscale x 2 x i32> * %p, <vscale x 2 x i1> %m, i64 %s, i32 zeroext %vl) {
+declare <vscale x 2 x i32> @llvm.riscv.vlse.nxv2i32(<vscale x 2 x i32>, ptr, i64, i64)
+define <vscale x 2 x i32> @vpmerge_vlse(<vscale x 2 x i32> %passthru,  ptr %p, <vscale x 2 x i1> %m, i64 %s, i32 zeroext %vl) {
 ; CHECK-LABEL: vpmerge_vlse:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, tu, mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
 ; CHECK-NEXT:    ret
   %1 = zext i32 %vl to i64
-  %a = call <vscale x 2 x i32> @llvm.riscv.vlse.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32>* %p, i64 %s, i64 %1)
+  %a = call <vscale x 2 x i32> @llvm.riscv.vlse.nxv2i32(<vscale x 2 x i32> undef, ptr %p, i64 %s, i64 %1)
   %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
   ret <vscale x 2 x i32> %b
 }
 
 ; Test indexed load by riscv.vluxei
-declare <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64(<vscale x 2 x i32>, <vscale x 2 x i32>*, <vscale x 2 x i64>, i64)
-define <vscale x 2 x i32> @vpmerge_vluxei(<vscale x 2 x i32> %passthru,  <vscale x 2 x i32> * %p, <vscale x 2 x i64> %idx, <vscale x 2 x i1> %m, i64 %s, i32 zeroext %vl) {
+declare <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64(<vscale x 2 x i32>, ptr, <vscale x 2 x i64>, i64)
+define <vscale x 2 x i32> @vpmerge_vluxei(<vscale x 2 x i32> %passthru,  ptr %p, <vscale x 2 x i64> %idx, <vscale x 2 x i1> %m, i64 %s, i32 zeroext %vl) {
 ; CHECK-LABEL: vpmerge_vluxei:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, tu, mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
   %1 = zext i32 %vl to i64
-  %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64(<vscale x 2 x i32> undef, <vscale x 2 x i32>* %p, <vscale x 2 x i64> %idx, i64 %1)
+  %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64(<vscale x 2 x i32> undef, ptr %p, <vscale x 2 x i64> %idx, i64 %1)
   %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
   ret <vscale x 2 x i32> %b
 }
@@ -643,7 +643,7 @@ define <vscale x 2 x float> @vpselect_vpfptrunc(<vscale x 2 x float> %passthru,
 }
 
 ; Test load operation by vp.load.
-define <vscale x 2 x i32> @vpselect_vpload(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> * %p, <vscale x 2 x i1> %m, i32 zeroext %vl) {
+define <vscale x 2 x i32> @vpselect_vpload(<vscale x 2 x i32> %passthru, ptr %p, <vscale x 2 x i1> %m, i32 zeroext %vl) {
 ; CHECK-LABEL: vpselect_vpload:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -651,13 +651,13 @@ define <vscale x 2 x i32> @vpselect_vpload(<vscale x 2 x i32> %passthru, <vscale
 ; CHECK-NEXT:    ret
   %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
   %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(<vscale x 2 x i32> * %p, <vscale x 2 x i1> %mask, i32 %vl)
+  %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(ptr %p, <vscale x 2 x i1> %mask, i32 %vl)
   %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
   ret <vscale x 2 x i32> %b
 }
 
 ; Test result has chain and glued node.
-define <vscale x 2 x i32> @vpselect_vpload2(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> * %p, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, i32 zeroext %vl) {
+define <vscale x 2 x i32> @vpselect_vpload2(<vscale x 2 x i32> %passthru, ptr %p, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, i32 zeroext %vl) {
 ; CHECK-LABEL: vpselect_vpload2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -666,14 +666,14 @@ define <vscale x 2 x i32> @vpselect_vpload2(<vscale x 2 x i32> %passthru, <vscal
 ; CHECK-NEXT:    ret
   %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
   %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(<vscale x 2 x i32> * %p, <vscale x 2 x i1> %mask, i32 %vl)
+  %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(ptr %p, <vscale x 2 x i1> %mask, i32 %vl)
   %m = call <vscale x 2 x i1> @llvm.vp.icmp.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, metadata !"eq", <vscale x 2 x i1> %mask, i32 %vl)
   %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
   ret <vscale x 2 x i32> %b
 }
 
 ; Test result has chain output of true operand of select.vvm.
-define void @vpselect_vpload_store(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> * %p, <vscale x 2 x i1> %m, i32 zeroext %vl) {
+define void @vpselect_vpload_store(<vscale x 2 x i32> %passthru, ptr %p, <vscale x 2 x i1> %m, i32 zeroext %vl) {
 ; CHECK-LABEL: vpselect_vpload_store:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -682,14 +682,14 @@ define void @vpselect_vpload_store(<vscale x 2 x i32> %passthru, <vscale x 2 x i
 ; CHECK-NEXT:    ret
   %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
   %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(<vscale x 2 x i32> * %p, <vscale x 2 x i1> %mask, i32 %vl)
+  %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(ptr %p, <vscale x 2 x i1> %mask, i32 %vl)
   %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
-  store <vscale x 2 x i32> %b, <vscale x 2 x i32> * %p
+  store <vscale x 2 x i32> %b, ptr %p
   ret void
 }
 
 ; FIXME: select vselect.vvm and vleffN.v
-define <vscale x 2 x i32> @vpselect_vleff(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> * %p, <vscale x 2 x i1> %m, i32 zeroext %vl) {
+define <vscale x 2 x i32> @vpselect_vleff(<vscale x 2 x i32> %passthru, ptr %p, <vscale x 2 x i1> %m, i32 zeroext %vl) {
 ; CHECK-LABEL: vpselect_vleff:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -698,34 +698,34 @@ define <vscale x 2 x i32> @vpselect_vleff(<vscale x 2 x i32> %passthru, <vscale
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
 ; CHECK-NEXT:    ret
   %1 = zext i32 %vl to i64
-  %a = call { <vscale x 2 x i32>, i64 } @llvm.riscv.vleff.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32>* %p, i64 %1)
+  %a = call { <vscale x 2 x i32>, i64 } @llvm.riscv.vleff.nxv2i32(<vscale x 2 x i32> undef, ptr %p, i64 %1)
   %b = extractvalue { <vscale x 2 x i32>, i64 } %a, 0
   %c = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %b, <vscale x 2 x i32> %passthru, i32 %vl)
   ret <vscale x 2 x i32> %c
 }
 
 ; Test strided load by riscv.vlse
-define <vscale x 2 x i32> @vpselect_vlse(<vscale x 2 x i32> %passthru,  <vscale x 2 x i32> * %p, <vscale x 2 x i1> %m, i64 %s, i32 zeroext %vl) {
+define <vscale x 2 x i32> @vpselect_vlse(<vscale x 2 x i32> %passthru,  ptr %p, <vscale x 2 x i1> %m, i64 %s, i32 zeroext %vl) {
 ; CHECK-LABEL: vpselect_vlse:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
 ; CHECK-NEXT:    ret
   %1 = zext i32 %vl to i64
-  %a = call <vscale x 2 x i32> @llvm.riscv.vlse.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32>* %p, i64 %s, i64 %1)
+  %a = call <vscale x 2 x i32> @llvm.riscv.vlse.nxv2i32(<vscale x 2 x i32> undef, ptr %p, i64 %s, i64 %1)
   %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
   ret <vscale x 2 x i32> %b
 }
 
 ; Test indexed load by riscv.vluxei
-define <vscale x 2 x i32> @vpselect_vluxei(<vscale x 2 x i32> %passthru,  <vscale x 2 x i32> * %p, <vscale x 2 x i64> %idx, <vscale x 2 x i1> %m, i64 %s, i32 zeroext %vl) {
+define <vscale x 2 x i32> @vpselect_vluxei(<vscale x 2 x i32> %passthru,  ptr %p, <vscale x 2 x i64> %idx, <vscale x 2 x i1> %m, i64 %s, i32 zeroext %vl) {
 ; CHECK-LABEL: vpselect_vluxei:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
   %1 = zext i32 %vl to i64
-  %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64(<vscale x 2 x i32> undef, <vscale x 2 x i32>* %p, <vscale x 2 x i64> %idx, i64 %1)
+  %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64(<vscale x 2 x i32> undef, ptr %p, <vscale x 2 x i64> %idx, i64 %1)
   %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
   ret <vscale x 2 x i32> %b
 }
@@ -970,11 +970,11 @@ define void @test_dag_loop() {
 ; CHECK-NEXT:    vse16.v v16, (zero)
 ; CHECK-NEXT:    ret
 entry:
-  %0 = call <vscale x 32 x i16> @llvm.riscv.vle.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16>* null, i64 1)
+  %0 = call <vscale x 32 x i16> @llvm.riscv.vle.nxv32i16.i64(<vscale x 32 x i16> undef, ptr null, i64 1)
   %1 = tail call <vscale x 32 x i8> @llvm.riscv.vssubu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> zeroinitializer, <vscale x 32 x i8> zeroinitializer, i8 0, <vscale x 32 x i1> zeroinitializer, i64 0, i64 0)
   %2 = tail call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> %1, <vscale x 32 x i8> zeroinitializer, i64 0)
   %3 = tail call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> zeroinitializer, <vscale x 32 x i16> zeroinitializer, <vscale x 32 x i16> %0, <vscale x 32 x i1> %2, i64 1)
-  call void @llvm.riscv.vse.nxv32i16.i64(<vscale x 32 x i16> %3, <vscale x 32 x i16>* null, i64 0)
+  call void @llvm.riscv.vse.nxv32i16.i64(<vscale x 32 x i16> %3, ptr null, i64 0)
   ret void
 }
 
@@ -1084,11 +1084,11 @@ define <vscale x 2 x float> @vfredusum_allones_mask(<vscale x 2 x float> %passth
   ret <vscale x 2 x float> %b
 }
 
-declare <vscale x 32 x i16> @llvm.riscv.vle.nxv32i16.i64(<vscale x 32 x i16>, <vscale x 32 x i16>* nocapture, i64)
+declare <vscale x 32 x i16> @llvm.riscv.vle.nxv32i16.i64(<vscale x 32 x i16>, ptr nocapture, i64)
 declare <vscale x 32 x i8> @llvm.riscv.vssubu.mask.nxv32i8.i8.i64(<vscale x 32 x i8>, <vscale x 32 x i8>, i8, <vscale x 32 x i1>, i64, i64 immarg)
 declare <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.nxv32i8.i64(<vscale x 32 x i8>, <vscale x 32 x i8>, i64)
 declare <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i64)
-declare void @llvm.riscv.vse.nxv32i16.i64(<vscale x 32 x i16>, <vscale x 32 x i16>* nocapture, i64)
+declare void @llvm.riscv.vse.nxv32i16.i64(<vscale x 32 x i16>, ptr nocapture, i64)
 declare <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.i16.i64(<vscale x 1 x i16>, <vscale x 1 x i16>, i16, i64 immarg, i64)
 declare <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/rvv-stack-align.mir b/llvm/test/CodeGen/RISCV/rvv/rvv-stack-align.mir
index d98e18b22f692..6ea6fb183a7fd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-stack-align.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-stack-align.mir
@@ -11,7 +11,7 @@
   target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n64-S128"
   target triple = "riscv64"
 
-  declare void @extern(<vscale x 4 x i32>*)
+  declare void @extern(ptr)
 
   define void @rvv_stack_align8() #0 {
   ; RV32-LABEL: rvv_stack_align8:
@@ -52,7 +52,7 @@
     %a = alloca <vscale x 4 x i32>, align 8
     %b = alloca i64
     %c = alloca i64
-    call void @extern(<vscale x 4 x i32>* %a)
+    call void @extern(ptr %a)
     ret void
   }
 
@@ -95,7 +95,7 @@
     %a = alloca <vscale x 4 x i32>, align 16
     %b = alloca i64
     %c = alloca i64
-    call void @extern(<vscale x 4 x i32>* %a)
+    call void @extern(ptr %a)
     ret void
   }
 
@@ -142,7 +142,7 @@
     %a = alloca <vscale x 4 x i32>, align 32
     %b = alloca i64
     %c = alloca i64
-    call void @extern(<vscale x 4 x i32>* %a)
+    call void @extern(ptr %a)
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/scalable-vector-struct.ll b/llvm/test/CodeGen/RISCV/rvv/scalable-vector-struct.ll
index 030555ca76c1f..19d9caadbc203 100644
--- a/llvm/test/CodeGen/RISCV/rvv/scalable-vector-struct.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/scalable-vector-struct.ll
@@ -4,7 +4,7 @@
 ; This demonstrates that we can pass a struct containing scalable vectors across
 ; a basic block.
 
-define i32 @foo({ {<vscale x 2 x i32>, <vscale x 2 x i32>}, i32 } %x, <vscale x 2 x i32>* %y, <vscale x 2 x i32>* %z) {
+define i32 @foo({ {<vscale x 2 x i32>, <vscale x 2 x i32>}, i32 } %x, ptr %y, ptr %z) {
 ; CHECK-LABEL: foo:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vs1r.v v8, (a1)
@@ -17,8 +17,8 @@ return:
   %a = extractvalue { {<vscale x 2 x i32>, <vscale x 2 x i32>}, i32 } %x, 1
   %b = extractvalue { {<vscale x 2 x i32>, <vscale x 2 x i32>}, i32 } %x, 0, 0
   %c = extractvalue { {<vscale x 2 x i32>, <vscale x 2 x i32>}, i32 } %x, 0, 1
-  store <vscale x 2 x i32> %b, <vscale x 2 x i32>* %y
-  store <vscale x 2 x i32> %c, <vscale x 2 x i32>* %z
+  store <vscale x 2 x i32> %b, ptr %y
+  store <vscale x 2 x i32> %c, ptr %z
 
   ret i32 %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/scalar-stack-align.ll b/llvm/test/CodeGen/RISCV/rvv/scalar-stack-align.ll
index 7aaafe9874fb9..409ef50aa53c8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/scalar-stack-align.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/scalar-stack-align.ll
@@ -46,8 +46,8 @@ define ptr @scalar_stack_align16() nounwind {
 ; RV64-NEXT:    ret
   %a = alloca <vscale x 2 x i32>
   %c = alloca i64, align 16
-  call void @extern(<vscale x 2 x i32>* %a)
+  call void @extern(ptr %a)
   ret ptr %c
 }
 
-declare void @extern(<vscale x 2 x i32>*)
+declare void @extern(ptr)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
index ede331cc376f8..191f047131fb1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
@@ -302,9 +302,9 @@ vector.ph:                                        ; preds = %entry
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %6 = getelementptr inbounds i32, ptr %a, i64 %index
-  %wide.load = load <vscale x 4 x i32>, <vscale x 4 x i32>* %6, align 4
+  %wide.load = load <vscale x 4 x i32>, ptr %6, align 4
   %7 = mul <vscale x 4 x i32> %wide.load, %broadcast.splat
-  store <vscale x 4 x i32> %7, <vscale x 4 x i32>* %6, align 4
+  store <vscale x 4 x i32> %7, ptr %6, align 4
   %index.next = add nuw i64 %index, %5
   %8 = icmp eq i64 %index.next, %n.vec
   br i1 %8, label %middle.block, label %vector.body
@@ -393,9 +393,9 @@ vector.ph:                                        ; preds = %entry
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %6 = getelementptr inbounds i32, ptr %a, i64 %index
-  %wide.load = load <vscale x 4 x i32>, <vscale x 4 x i32>* %6, align 4
+  %wide.load = load <vscale x 4 x i32>, ptr %6, align 4
   %7 = add <vscale x 4 x i32> %wide.load, %broadcast.splat
-  store <vscale x 4 x i32> %7, <vscale x 4 x i32>* %6, align 4
+  store <vscale x 4 x i32> %7, ptr %6, align 4
   %index.next = add nuw i64 %index, %5
   %8 = icmp eq i64 %index.next, %n.vec
   br i1 %8, label %middle.block, label %vector.body
@@ -484,9 +484,9 @@ vector.ph:                                        ; preds = %entry
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %6 = getelementptr inbounds i32, ptr %a, i64 %index
-  %wide.load = load <vscale x 4 x i32>, <vscale x 4 x i32>* %6, align 4
+  %wide.load = load <vscale x 4 x i32>, ptr %6, align 4
   %7 = sub <vscale x 4 x i32> %wide.load, %broadcast.splat
-  store <vscale x 4 x i32> %7, <vscale x 4 x i32>* %6, align 4
+  store <vscale x 4 x i32> %7, ptr %6, align 4
   %index.next = add nuw i64 %index, %5
   %8 = icmp eq i64 %index.next, %n.vec
   br i1 %8, label %middle.block, label %vector.body
@@ -575,9 +575,9 @@ vector.ph:                                        ; preds = %entry
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %6 = getelementptr inbounds i32, ptr %a, i64 %index
-  %wide.load = load <vscale x 4 x i32>, <vscale x 4 x i32>* %6, align 4
+  %wide.load = load <vscale x 4 x i32>, ptr %6, align 4
   %7 = sub <vscale x 4 x i32> %broadcast.splat, %wide.load
-  store <vscale x 4 x i32> %7, <vscale x 4 x i32>* %6, align 4
+  store <vscale x 4 x i32> %7, ptr %6, align 4
   %index.next = add nuw i64 %index, %5
   %8 = icmp eq i64 %index.next, %n.vec
   br i1 %8, label %middle.block, label %vector.body
@@ -666,9 +666,9 @@ vector.ph:                                        ; preds = %entry
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %6 = getelementptr inbounds i32, ptr %a, i64 %index
-  %wide.load = load <vscale x 4 x i32>, <vscale x 4 x i32>* %6, align 4
+  %wide.load = load <vscale x 4 x i32>, ptr %6, align 4
   %7 = and <vscale x 4 x i32> %wide.load, %broadcast.splat
-  store <vscale x 4 x i32> %7, <vscale x 4 x i32>* %6, align 4
+  store <vscale x 4 x i32> %7, ptr %6, align 4
   %index.next = add nuw i64 %index, %5
   %8 = icmp eq i64 %index.next, %n.vec
   br i1 %8, label %middle.block, label %vector.body
@@ -757,9 +757,9 @@ vector.ph:                                        ; preds = %entry
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %6 = getelementptr inbounds i32, ptr %a, i64 %index
-  %wide.load = load <vscale x 4 x i32>, <vscale x 4 x i32>* %6, align 4
+  %wide.load = load <vscale x 4 x i32>, ptr %6, align 4
   %7 = or <vscale x 4 x i32> %wide.load, %broadcast.splat
-  store <vscale x 4 x i32> %7, <vscale x 4 x i32>* %6, align 4
+  store <vscale x 4 x i32> %7, ptr %6, align 4
   %index.next = add nuw i64 %index, %5
   %8 = icmp eq i64 %index.next, %n.vec
   br i1 %8, label %middle.block, label %vector.body
@@ -848,9 +848,9 @@ vector.ph:                                        ; preds = %entry
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %6 = getelementptr inbounds i32, ptr %a, i64 %index
-  %wide.load = load <vscale x 4 x i32>, <vscale x 4 x i32>* %6, align 4
+  %wide.load = load <vscale x 4 x i32>, ptr %6, align 4
   %7 = xor <vscale x 4 x i32> %wide.load, %broadcast.splat
-  store <vscale x 4 x i32> %7, <vscale x 4 x i32>* %6, align 4
+  store <vscale x 4 x i32> %7, ptr %6, align 4
   %index.next = add nuw i64 %index, %5
   %8 = icmp eq i64 %index.next, %n.vec
   br i1 %8, label %middle.block, label %vector.body
@@ -1041,9 +1041,9 @@ vector.ph:                                        ; preds = %entry
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %6 = getelementptr inbounds i32, ptr %a, i64 %index
-  %wide.load = load <vscale x 4 x i32>, <vscale x 4 x i32>* %6, align 4
+  %wide.load = load <vscale x 4 x i32>, ptr %6, align 4
   %7 = shl <vscale x 4 x i32> %wide.load, %broadcast.splat
-  store <vscale x 4 x i32> %7, <vscale x 4 x i32>* %6, align 4
+  store <vscale x 4 x i32> %7, ptr %6, align 4
   %index.next = add nuw i64 %index, %5
   %8 = icmp eq i64 %index.next, %n.vec
   br i1 %8, label %middle.block, label %vector.body
@@ -1132,9 +1132,9 @@ vector.ph:                                        ; preds = %entry
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %6 = getelementptr inbounds i32, ptr %a, i64 %index
-  %wide.load = load <vscale x 4 x i32>, <vscale x 4 x i32>* %6, align 4
+  %wide.load = load <vscale x 4 x i32>, ptr %6, align 4
   %7 = lshr <vscale x 4 x i32> %wide.load, %broadcast.splat
-  store <vscale x 4 x i32> %7, <vscale x 4 x i32>* %6, align 4
+  store <vscale x 4 x i32> %7, ptr %6, align 4
   %index.next = add nuw i64 %index, %5
   %8 = icmp eq i64 %index.next, %n.vec
   br i1 %8, label %middle.block, label %vector.body
@@ -1223,9 +1223,9 @@ vector.ph:                                        ; preds = %entry
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %6 = getelementptr inbounds i32, ptr %a, i64 %index
-  %wide.load = load <vscale x 4 x i32>, <vscale x 4 x i32>* %6, align 4
+  %wide.load = load <vscale x 4 x i32>, ptr %6, align 4
   %7 = ashr <vscale x 4 x i32> %wide.load, %broadcast.splat
-  store <vscale x 4 x i32> %7, <vscale x 4 x i32>* %6, align 4
+  store <vscale x 4 x i32> %7, ptr %6, align 4
   %index.next = add nuw i64 %index, %5
   %8 = icmp eq i64 %index.next, %n.vec
   br i1 %8, label %middle.block, label %vector.body
@@ -1517,9 +1517,9 @@ vector.ph:                                        ; preds = %entry
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %6 = getelementptr inbounds float, ptr %a, i64 %index
-  %wide.load = load <vscale x 2 x float>, <vscale x 2 x float>* %6, align 4
+  %wide.load = load <vscale x 2 x float>, ptr %6, align 4
   %7 = fmul <vscale x 2 x float> %wide.load, %broadcast.splat
-  store <vscale x 2 x float> %7, <vscale x 2 x float>* %6, align 4
+  store <vscale x 2 x float> %7, ptr %6, align 4
   %index.next = add nuw i64 %index, %5
   %8 = icmp eq i64 %index.next, %n.vec
   br i1 %8, label %middle.block, label %vector.body
@@ -1607,9 +1607,9 @@ vector.ph:                                        ; preds = %entry
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %6 = getelementptr inbounds float, ptr %a, i64 %index
-  %wide.load = load <vscale x 2 x float>, <vscale x 2 x float>* %6, align 4
+  %wide.load = load <vscale x 2 x float>, ptr %6, align 4
   %7 = fdiv <vscale x 2 x float> %wide.load, %broadcast.splat
-  store <vscale x 2 x float> %7, <vscale x 2 x float>* %6, align 4
+  store <vscale x 2 x float> %7, ptr %6, align 4
   %index.next = add nuw i64 %index, %5
   %8 = icmp eq i64 %index.next, %n.vec
   br i1 %8, label %middle.block, label %vector.body
@@ -1697,9 +1697,9 @@ vector.ph:                                        ; preds = %entry
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %6 = getelementptr inbounds float, ptr %a, i64 %index
-  %wide.load = load <vscale x 2 x float>, <vscale x 2 x float>* %6, align 4
+  %wide.load = load <vscale x 2 x float>, ptr %6, align 4
   %7 = fdiv <vscale x 2 x float> %broadcast.splat, %wide.load
-  store <vscale x 2 x float> %7, <vscale x 2 x float>* %6, align 4
+  store <vscale x 2 x float> %7, ptr %6, align 4
   %index.next = add nuw i64 %index, %5
   %8 = icmp eq i64 %index.next, %n.vec
   br i1 %8, label %middle.block, label %vector.body
@@ -1787,9 +1787,9 @@ vector.ph:                                        ; preds = %entry
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %6 = getelementptr inbounds float, ptr %a, i64 %index
-  %wide.load = load <vscale x 2 x float>, <vscale x 2 x float>* %6, align 4
+  %wide.load = load <vscale x 2 x float>, ptr %6, align 4
   %7 = fadd <vscale x 2 x float> %wide.load, %broadcast.splat
-  store <vscale x 2 x float> %7, <vscale x 2 x float>* %6, align 4
+  store <vscale x 2 x float> %7, ptr %6, align 4
   %index.next = add nuw i64 %index, %5
   %8 = icmp eq i64 %index.next, %n.vec
   br i1 %8, label %middle.block, label %vector.body
@@ -1877,9 +1877,9 @@ vector.ph:                                        ; preds = %entry
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %6 = getelementptr inbounds float, ptr %a, i64 %index
-  %wide.load = load <vscale x 2 x float>, <vscale x 2 x float>* %6, align 4
+  %wide.load = load <vscale x 2 x float>, ptr %6, align 4
   %7 = fsub <vscale x 2 x float> %wide.load, %broadcast.splat
-  store <vscale x 2 x float> %7, <vscale x 2 x float>* %6, align 4
+  store <vscale x 2 x float> %7, ptr %6, align 4
   %index.next = add nuw i64 %index, %5
   %8 = icmp eq i64 %index.next, %n.vec
   br i1 %8, label %middle.block, label %vector.body
@@ -1967,9 +1967,9 @@ vector.ph:                                        ; preds = %entry
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %6 = getelementptr inbounds float, ptr %a, i64 %index
-  %wide.load = load <vscale x 2 x float>, <vscale x 2 x float>* %6, align 4
+  %wide.load = load <vscale x 2 x float>, ptr %6, align 4
   %7 = fsub <vscale x 2 x float> %broadcast.splat, %wide.load
-  store <vscale x 2 x float> %7, <vscale x 2 x float>* %6, align 4
+  store <vscale x 2 x float> %7, ptr %6, align 4
   %index.next = add nuw i64 %index, %5
   %8 = icmp eq i64 %index.next, %n.vec
   br i1 %8, label %middle.block, label %vector.body
@@ -2139,11 +2139,11 @@ vector.ph:                                        ; preds = %entry
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %6 = getelementptr inbounds float, ptr %a, i64 %index
-  %wide.load = load <vscale x 2 x float>, <vscale x 2 x float>* %6, align 4
+  %wide.load = load <vscale x 2 x float>, ptr %6, align 4
   %7 = getelementptr inbounds float, ptr %b, i64 %index
-  %wide.load12 = load <vscale x 2 x float>, <vscale x 2 x float>* %7, align 4
+  %wide.load12 = load <vscale x 2 x float>, ptr %7, align 4
   %8 = call <vscale x 2 x float> @llvm.fma.nxv2f32(<vscale x 2 x float> %wide.load, <vscale x 2 x float> %broadcast.splat, <vscale x 2 x float> %wide.load12)
-  store <vscale x 2 x float> %8, <vscale x 2 x float>* %6, align 4
+  store <vscale x 2 x float> %8, ptr %6, align 4
   %index.next = add nuw i64 %index, %5
   %9 = icmp eq i64 %index.next, %n.vec
   br i1 %9, label %middle.block, label %vector.body
@@ -2239,11 +2239,11 @@ vector.ph:                                        ; preds = %entry
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %6 = getelementptr inbounds float, ptr %a, i64 %index
-  %wide.load = load <vscale x 2 x float>, <vscale x 2 x float>* %6, align 4
+  %wide.load = load <vscale x 2 x float>, ptr %6, align 4
   %7 = getelementptr inbounds float, ptr %b, i64 %index
-  %wide.load12 = load <vscale x 2 x float>, <vscale x 2 x float>* %7, align 4
+  %wide.load12 = load <vscale x 2 x float>, ptr %7, align 4
   %8 = call <vscale x 2 x float> @llvm.fma.nxv2f32(<vscale x 2 x float> %broadcast.splat, <vscale x 2 x float> %wide.load, <vscale x 2 x float> %wide.load12)
-  store <vscale x 2 x float> %8, <vscale x 2 x float>* %6, align 4
+  store <vscale x 2 x float> %8, ptr %6, align 4
   %index.next = add nuw i64 %index, %5
   %9 = icmp eq i64 %index.next, %n.vec
   br i1 %9, label %middle.block, label %vector.body
@@ -2547,9 +2547,9 @@ vector.ph:                                        ; preds = %entry
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %6 = getelementptr inbounds i32, ptr %a, i64 %index
-  %wide.load = load <vscale x 4 x i32>, <vscale x 4 x i32>* %6, align 4
+  %wide.load = load <vscale x 4 x i32>, ptr %6, align 4
   %7 = udiv <vscale x 4 x i32> %wide.load, %broadcast.splat
-  store <vscale x 4 x i32> %7, <vscale x 4 x i32>* %6, align 4
+  store <vscale x 4 x i32> %7, ptr %6, align 4
   %index.next = add nuw i64 %index, %5
   %8 = icmp eq i64 %index.next, %n.vec
   br i1 %8, label %middle.block, label %vector.body
@@ -2638,9 +2638,9 @@ vector.ph:                                        ; preds = %entry
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %6 = getelementptr inbounds i32, ptr %a, i64 %index
-  %wide.load = load <vscale x 4 x i32>, <vscale x 4 x i32>* %6, align 4
+  %wide.load = load <vscale x 4 x i32>, ptr %6, align 4
   %7 = sdiv <vscale x 4 x i32> %wide.load, %broadcast.splat
-  store <vscale x 4 x i32> %7, <vscale x 4 x i32>* %6, align 4
+  store <vscale x 4 x i32> %7, ptr %6, align 4
   %index.next = add nuw i64 %index, %5
   %8 = icmp eq i64 %index.next, %n.vec
   br i1 %8, label %middle.block, label %vector.body
@@ -2729,9 +2729,9 @@ vector.ph:                                        ; preds = %entry
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %6 = getelementptr inbounds i32, ptr %a, i64 %index
-  %wide.load = load <vscale x 4 x i32>, <vscale x 4 x i32>* %6, align 4
+  %wide.load = load <vscale x 4 x i32>, ptr %6, align 4
   %7 = urem <vscale x 4 x i32> %wide.load, %broadcast.splat
-  store <vscale x 4 x i32> %7, <vscale x 4 x i32>* %6, align 4
+  store <vscale x 4 x i32> %7, ptr %6, align 4
   %index.next = add nuw i64 %index, %5
   %8 = icmp eq i64 %index.next, %n.vec
   br i1 %8, label %middle.block, label %vector.body
@@ -2820,9 +2820,9 @@ vector.ph:                                        ; preds = %entry
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %6 = getelementptr inbounds i32, ptr %a, i64 %index
-  %wide.load = load <vscale x 4 x i32>, <vscale x 4 x i32>* %6, align 4
+  %wide.load = load <vscale x 4 x i32>, ptr %6, align 4
   %7 = srem <vscale x 4 x i32> %wide.load, %broadcast.splat
-  store <vscale x 4 x i32> %7, <vscale x 4 x i32>* %6, align 4
+  store <vscale x 4 x i32> %7, ptr %6, align 4
   %index.next = add nuw i64 %index, %5
   %8 = icmp eq i64 %index.next, %n.vec
   br i1 %8, label %middle.block, label %vector.body

diff  --git a/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll b/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll
index 47074d612bb64..a834630e7ebea 100644
--- a/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll
@@ -698,7 +698,7 @@ declare <vscale x 16 x double> @llvm.experimental.vp.strided.load.nxv16f64.p0.i6
 ; NOTE: We can't return <vscale x 17 x double> as that introduces a vector
 ; store that can't yet be legalized through widening. In order to test purely
 ; the vp.strided.load legalization, we manually split it.
-define <vscale x 16 x double> @strided_load_nxv17f64(ptr %ptr, i64 %stride, <vscale x 17 x i1> %mask, i32 zeroext %evl, <vscale x 1 x double>* %hi_ptr) {
+define <vscale x 16 x double> @strided_load_nxv17f64(ptr %ptr, i64 %stride, <vscale x 17 x i1> %mask, i32 zeroext %evl, ptr %hi_ptr) {
 ; CHECK-RV32-LABEL: strided_load_nxv17f64:
 ; CHECK-RV32:       # %bb.0:
 ; CHECK-RV32-NEXT:    csrr a2, vlenb
@@ -795,7 +795,7 @@ define <vscale x 16 x double> @strided_load_nxv17f64(ptr %ptr, i64 %stride, <vsc
   %v = call <vscale x 17 x double> @llvm.experimental.vp.strided.load.nxv17f64.p0.i64(ptr %ptr, i64 %stride, <vscale x 17 x i1> %mask, i32 %evl)
   %lo = call <vscale x 16 x double> @llvm.experimental.vector.extract.nxv16f64(<vscale x 17 x double> %v, i64 0)
   %hi = call <vscale x 1 x double> @llvm.experimental.vector.extract.nxv1f64(<vscale x 17 x double> %v, i64 16)
-  store <vscale x 1 x double> %hi, <vscale x 1 x double>* %hi_ptr
+  store <vscale x 1 x double> %hi, ptr %hi_ptr
   ret <vscale x 16 x double> %lo
 }
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir b/llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir
index 86a60e425f819..89b756818e7f5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir
@@ -13,13 +13,13 @@
   target triple = "riscv64"
 
   ; Function Attrs: nounwind
-  define <vscale x 8 x i64> @masked_load_nxv8i64(<vscale x 8 x i64>* %a, <vscale x 8 x i1> %mask) #0 {
-    %load = call <vscale x 8 x i64> @llvm.masked.load.nxv8i64.p0nxv8i64(<vscale x 8 x i64>* %a, i32 8, <vscale x 8 x i1> %mask, <vscale x 8 x i64> undef)
+  define <vscale x 8 x i64> @masked_load_nxv8i64(ptr %a, <vscale x 8 x i1> %mask) #0 {
+    %load = call <vscale x 8 x i64> @llvm.masked.load.nxv8i64.p0(ptr %a, i32 8, <vscale x 8 x i1> %mask, <vscale x 8 x i64> undef)
     ret <vscale x 8 x i64> %load
   }
 
   ; Function Attrs: argmemonly nofree nosync nounwind readonly willreturn
-  declare <vscale x 8 x i64> @llvm.masked.load.nxv8i64.p0nxv8i64(<vscale x 8 x i64>*, i32 immarg, <vscale x 8 x i1>, <vscale x 8 x i64>) #1
+  declare <vscale x 8 x i64> @llvm.masked.load.nxv8i64.p0(ptr, i32 immarg, <vscale x 8 x i1>, <vscale x 8 x i64>) #1
 
   attributes #0 = { nounwind "target-features"="+v" }
   attributes #1 = { argmemonly nofree nosync nounwind readonly willreturn "target-features"="+v" }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/unaligned-loads-stores.ll b/llvm/test/CodeGen/RISCV/rvv/unaligned-loads-stores.ll
index a56ff463154e2..f488baf5a9d9f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/unaligned-loads-stores.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/unaligned-loads-stores.ll
@@ -9,7 +9,7 @@
 ; RUN:    -verify-machineinstrs | FileCheck --check-prefix=FAST %s
 
 
-define <vscale x 1 x i32> @unaligned_load_nxv1i32_a1(<vscale x 1 x i32>* %ptr) {
+define <vscale x 1 x i32> @unaligned_load_nxv1i32_a1(ptr %ptr) {
 ; CHECK-LABEL: unaligned_load_nxv1i32_a1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
@@ -21,11 +21,11 @@ define <vscale x 1 x i32> @unaligned_load_nxv1i32_a1(<vscale x 1 x i32>* %ptr) {
 ; FAST-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
 ; FAST-NEXT:    vle32.v v8, (a0)
 ; FAST-NEXT:    ret
-  %v = load <vscale x 1 x i32>, <vscale x 1 x i32>* %ptr, align 1
+  %v = load <vscale x 1 x i32>, ptr %ptr, align 1
   ret <vscale x 1 x i32> %v
 }
 
-define <vscale x 1 x i32> @unaligned_load_nxv1i32_a2(<vscale x 1 x i32>* %ptr) {
+define <vscale x 1 x i32> @unaligned_load_nxv1i32_a2(ptr %ptr) {
 ; CHECK-LABEL: unaligned_load_nxv1i32_a2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
@@ -37,11 +37,11 @@ define <vscale x 1 x i32> @unaligned_load_nxv1i32_a2(<vscale x 1 x i32>* %ptr) {
 ; FAST-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
 ; FAST-NEXT:    vle32.v v8, (a0)
 ; FAST-NEXT:    ret
-  %v = load <vscale x 1 x i32>, <vscale x 1 x i32>* %ptr, align 2
+  %v = load <vscale x 1 x i32>, ptr %ptr, align 2
   ret <vscale x 1 x i32> %v
 }
 
-define <vscale x 1 x i32> @aligned_load_nxv1i32_a4(<vscale x 1 x i32>* %ptr) {
+define <vscale x 1 x i32> @aligned_load_nxv1i32_a4(ptr %ptr) {
 ; CHECK-LABEL: aligned_load_nxv1i32_a4:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
@@ -53,11 +53,11 @@ define <vscale x 1 x i32> @aligned_load_nxv1i32_a4(<vscale x 1 x i32>* %ptr) {
 ; FAST-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
 ; FAST-NEXT:    vle32.v v8, (a0)
 ; FAST-NEXT:    ret
-  %v = load <vscale x 1 x i32>, <vscale x 1 x i32>* %ptr, align 4
+  %v = load <vscale x 1 x i32>, ptr %ptr, align 4
   ret <vscale x 1 x i32> %v
 }
 
-define <vscale x 1 x i64> @unaligned_load_nxv1i64_a1(<vscale x 1 x i64>* %ptr) {
+define <vscale x 1 x i64> @unaligned_load_nxv1i64_a1(ptr %ptr) {
 ; CHECK-LABEL: unaligned_load_nxv1i64_a1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl1r.v v8, (a0)
@@ -67,11 +67,11 @@ define <vscale x 1 x i64> @unaligned_load_nxv1i64_a1(<vscale x 1 x i64>* %ptr) {
 ; FAST:       # %bb.0:
 ; FAST-NEXT:    vl1re64.v v8, (a0)
 ; FAST-NEXT:    ret
-  %v = load <vscale x 1 x i64>, <vscale x 1 x i64>* %ptr, align 1
+  %v = load <vscale x 1 x i64>, ptr %ptr, align 1
   ret <vscale x 1 x i64> %v
 }
 
-define <vscale x 1 x i64> @unaligned_load_nxv1i64_a4(<vscale x 1 x i64>* %ptr) {
+define <vscale x 1 x i64> @unaligned_load_nxv1i64_a4(ptr %ptr) {
 ; CHECK-LABEL: unaligned_load_nxv1i64_a4:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl1r.v v8, (a0)
@@ -81,11 +81,11 @@ define <vscale x 1 x i64> @unaligned_load_nxv1i64_a4(<vscale x 1 x i64>* %ptr) {
 ; FAST:       # %bb.0:
 ; FAST-NEXT:    vl1re64.v v8, (a0)
 ; FAST-NEXT:    ret
-  %v = load <vscale x 1 x i64>, <vscale x 1 x i64>* %ptr, align 4
+  %v = load <vscale x 1 x i64>, ptr %ptr, align 4
   ret <vscale x 1 x i64> %v
 }
 
-define <vscale x 1 x i64> @aligned_load_nxv1i64_a8(<vscale x 1 x i64>* %ptr) {
+define <vscale x 1 x i64> @aligned_load_nxv1i64_a8(ptr %ptr) {
 ; CHECK-LABEL: aligned_load_nxv1i64_a8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl1re64.v v8, (a0)
@@ -95,11 +95,11 @@ define <vscale x 1 x i64> @aligned_load_nxv1i64_a8(<vscale x 1 x i64>* %ptr) {
 ; FAST:       # %bb.0:
 ; FAST-NEXT:    vl1re64.v v8, (a0)
 ; FAST-NEXT:    ret
-  %v = load <vscale x 1 x i64>, <vscale x 1 x i64>* %ptr, align 8
+  %v = load <vscale x 1 x i64>, ptr %ptr, align 8
   ret <vscale x 1 x i64> %v
 }
 
-define <vscale x 2 x i64> @unaligned_load_nxv2i64_a1(<vscale x 2 x i64>* %ptr) {
+define <vscale x 2 x i64> @unaligned_load_nxv2i64_a1(ptr %ptr) {
 ; CHECK-LABEL: unaligned_load_nxv2i64_a1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl2r.v v8, (a0)
@@ -109,11 +109,11 @@ define <vscale x 2 x i64> @unaligned_load_nxv2i64_a1(<vscale x 2 x i64>* %ptr) {
 ; FAST:       # %bb.0:
 ; FAST-NEXT:    vl2re64.v v8, (a0)
 ; FAST-NEXT:    ret
-  %v = load <vscale x 2 x i64>, <vscale x 2 x i64>* %ptr, align 1
+  %v = load <vscale x 2 x i64>, ptr %ptr, align 1
   ret <vscale x 2 x i64> %v
 }
 
-define <vscale x 2 x i64> @unaligned_load_nxv2i64_a4(<vscale x 2 x i64>* %ptr) {
+define <vscale x 2 x i64> @unaligned_load_nxv2i64_a4(ptr %ptr) {
 ; CHECK-LABEL: unaligned_load_nxv2i64_a4:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl2r.v v8, (a0)
@@ -123,11 +123,11 @@ define <vscale x 2 x i64> @unaligned_load_nxv2i64_a4(<vscale x 2 x i64>* %ptr) {
 ; FAST:       # %bb.0:
 ; FAST-NEXT:    vl2re64.v v8, (a0)
 ; FAST-NEXT:    ret
-  %v = load <vscale x 2 x i64>, <vscale x 2 x i64>* %ptr, align 4
+  %v = load <vscale x 2 x i64>, ptr %ptr, align 4
   ret <vscale x 2 x i64> %v
 }
 
-define <vscale x 2 x i64> @aligned_load_nxv2i64_a8(<vscale x 2 x i64>* %ptr) {
+define <vscale x 2 x i64> @aligned_load_nxv2i64_a8(ptr %ptr) {
 ; CHECK-LABEL: aligned_load_nxv2i64_a8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl2re64.v v8, (a0)
@@ -137,12 +137,12 @@ define <vscale x 2 x i64> @aligned_load_nxv2i64_a8(<vscale x 2 x i64>* %ptr) {
 ; FAST:       # %bb.0:
 ; FAST-NEXT:    vl2re64.v v8, (a0)
 ; FAST-NEXT:    ret
-  %v = load <vscale x 2 x i64>, <vscale x 2 x i64>* %ptr, align 8
+  %v = load <vscale x 2 x i64>, ptr %ptr, align 8
   ret <vscale x 2 x i64> %v
 }
 
 ; Masks should always be aligned
-define <vscale x 1 x i1> @unaligned_load_nxv1i1_a1(<vscale x 1 x i1>* %ptr) {
+define <vscale x 1 x i1> @unaligned_load_nxv1i1_a1(ptr %ptr) {
 ; CHECK-LABEL: unaligned_load_nxv1i1_a1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
@@ -154,11 +154,11 @@ define <vscale x 1 x i1> @unaligned_load_nxv1i1_a1(<vscale x 1 x i1>* %ptr) {
 ; FAST-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
 ; FAST-NEXT:    vlm.v v0, (a0)
 ; FAST-NEXT:    ret
-  %v = load <vscale x 1 x i1>, <vscale x 1 x i1>* %ptr, align 1
+  %v = load <vscale x 1 x i1>, ptr %ptr, align 1
   ret <vscale x 1 x i1> %v
 }
 
-define <vscale x 4 x float> @unaligned_load_nxv4f32_a1(<vscale x 4 x float>* %ptr) {
+define <vscale x 4 x float> @unaligned_load_nxv4f32_a1(ptr %ptr) {
 ; CHECK-LABEL: unaligned_load_nxv4f32_a1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl2r.v v8, (a0)
@@ -168,11 +168,11 @@ define <vscale x 4 x float> @unaligned_load_nxv4f32_a1(<vscale x 4 x float>* %pt
 ; FAST:       # %bb.0:
 ; FAST-NEXT:    vl2re32.v v8, (a0)
 ; FAST-NEXT:    ret
-  %v = load <vscale x 4 x float>, <vscale x 4 x float>* %ptr, align 1
+  %v = load <vscale x 4 x float>, ptr %ptr, align 1
   ret <vscale x 4 x float> %v
 }
 
-define <vscale x 4 x float> @unaligned_load_nxv4f32_a2(<vscale x 4 x float>* %ptr) {
+define <vscale x 4 x float> @unaligned_load_nxv4f32_a2(ptr %ptr) {
 ; CHECK-LABEL: unaligned_load_nxv4f32_a2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl2r.v v8, (a0)
@@ -182,11 +182,11 @@ define <vscale x 4 x float> @unaligned_load_nxv4f32_a2(<vscale x 4 x float>* %pt
 ; FAST:       # %bb.0:
 ; FAST-NEXT:    vl2re32.v v8, (a0)
 ; FAST-NEXT:    ret
-  %v = load <vscale x 4 x float>, <vscale x 4 x float>* %ptr, align 2
+  %v = load <vscale x 4 x float>, ptr %ptr, align 2
   ret <vscale x 4 x float> %v
 }
 
-define <vscale x 4 x float> @aligned_load_nxv4f32_a4(<vscale x 4 x float>* %ptr) {
+define <vscale x 4 x float> @aligned_load_nxv4f32_a4(ptr %ptr) {
 ; CHECK-LABEL: aligned_load_nxv4f32_a4:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl2re32.v v8, (a0)
@@ -196,11 +196,11 @@ define <vscale x 4 x float> @aligned_load_nxv4f32_a4(<vscale x 4 x float>* %ptr)
 ; FAST:       # %bb.0:
 ; FAST-NEXT:    vl2re32.v v8, (a0)
 ; FAST-NEXT:    ret
-  %v = load <vscale x 4 x float>, <vscale x 4 x float>* %ptr, align 4
+  %v = load <vscale x 4 x float>, ptr %ptr, align 4
   ret <vscale x 4 x float> %v
 }
 
-define <vscale x 8 x half> @unaligned_load_nxv8f16_a1(<vscale x 8 x half>* %ptr) {
+define <vscale x 8 x half> @unaligned_load_nxv8f16_a1(ptr %ptr) {
 ; CHECK-LABEL: unaligned_load_nxv8f16_a1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl2r.v v8, (a0)
@@ -210,11 +210,11 @@ define <vscale x 8 x half> @unaligned_load_nxv8f16_a1(<vscale x 8 x half>* %ptr)
 ; FAST:       # %bb.0:
 ; FAST-NEXT:    vl2re16.v v8, (a0)
 ; FAST-NEXT:    ret
-  %v = load <vscale x 8 x half>, <vscale x 8 x half>* %ptr, align 1
+  %v = load <vscale x 8 x half>, ptr %ptr, align 1
   ret <vscale x 8 x half> %v
 }
 
-define <vscale x 8 x half> @aligned_load_nxv8f16_a2(<vscale x 8 x half>* %ptr) {
+define <vscale x 8 x half> @aligned_load_nxv8f16_a2(ptr %ptr) {
 ; CHECK-LABEL: aligned_load_nxv8f16_a2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl2re16.v v8, (a0)
@@ -224,11 +224,11 @@ define <vscale x 8 x half> @aligned_load_nxv8f16_a2(<vscale x 8 x half>* %ptr) {
 ; FAST:       # %bb.0:
 ; FAST-NEXT:    vl2re16.v v8, (a0)
 ; FAST-NEXT:    ret
-  %v = load <vscale x 8 x half>, <vscale x 8 x half>* %ptr, align 2
+  %v = load <vscale x 8 x half>, ptr %ptr, align 2
   ret <vscale x 8 x half> %v
 }
 
-define void @unaligned_store_nxv4i32_a1(<vscale x 4 x i32> %x, <vscale x 4 x i32>* %ptr) {
+define void @unaligned_store_nxv4i32_a1(<vscale x 4 x i32> %x, ptr %ptr) {
 ; CHECK-LABEL: unaligned_store_nxv4i32_a1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vs2r.v v8, (a0)
@@ -238,11 +238,11 @@ define void @unaligned_store_nxv4i32_a1(<vscale x 4 x i32> %x, <vscale x 4 x i32
 ; FAST:       # %bb.0:
 ; FAST-NEXT:    vs2r.v v8, (a0)
 ; FAST-NEXT:    ret
-  store <vscale x 4 x i32> %x, <vscale x 4 x i32>* %ptr, align 1
+  store <vscale x 4 x i32> %x, ptr %ptr, align 1
   ret void
 }
 
-define void @unaligned_store_nxv4i32_a2(<vscale x 4 x i32> %x, <vscale x 4 x i32>* %ptr) {
+define void @unaligned_store_nxv4i32_a2(<vscale x 4 x i32> %x, ptr %ptr) {
 ; CHECK-LABEL: unaligned_store_nxv4i32_a2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vs2r.v v8, (a0)
@@ -252,11 +252,11 @@ define void @unaligned_store_nxv4i32_a2(<vscale x 4 x i32> %x, <vscale x 4 x i32
 ; FAST:       # %bb.0:
 ; FAST-NEXT:    vs2r.v v8, (a0)
 ; FAST-NEXT:    ret
-  store <vscale x 4 x i32> %x, <vscale x 4 x i32>* %ptr, align 2
+  store <vscale x 4 x i32> %x, ptr %ptr, align 2
   ret void
 }
 
-define void @aligned_store_nxv4i32_a4(<vscale x 4 x i32> %x, <vscale x 4 x i32>* %ptr) {
+define void @aligned_store_nxv4i32_a4(<vscale x 4 x i32> %x, ptr %ptr) {
 ; CHECK-LABEL: aligned_store_nxv4i32_a4:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vs2r.v v8, (a0)
@@ -266,11 +266,11 @@ define void @aligned_store_nxv4i32_a4(<vscale x 4 x i32> %x, <vscale x 4 x i32>*
 ; FAST:       # %bb.0:
 ; FAST-NEXT:    vs2r.v v8, (a0)
 ; FAST-NEXT:    ret
-  store <vscale x 4 x i32> %x, <vscale x 4 x i32>* %ptr, align 4
+  store <vscale x 4 x i32> %x, ptr %ptr, align 4
   ret void
 }
 
-define void @unaligned_store_nxv1i16_a1(<vscale x 1 x i16> %x, <vscale x 1 x i16>* %ptr) {
+define void @unaligned_store_nxv1i16_a1(<vscale x 1 x i16> %x, ptr %ptr) {
 ; CHECK-LABEL: unaligned_store_nxv1i16_a1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
@@ -282,11 +282,11 @@ define void @unaligned_store_nxv1i16_a1(<vscale x 1 x i16> %x, <vscale x 1 x i16
 ; FAST-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
 ; FAST-NEXT:    vse16.v v8, (a0)
 ; FAST-NEXT:    ret
-  store <vscale x 1 x i16> %x, <vscale x 1 x i16>* %ptr, align 1
+  store <vscale x 1 x i16> %x, ptr %ptr, align 1
   ret void
 }
 
-define void @aligned_store_nxv1i16_a2(<vscale x 1 x i16> %x, <vscale x 1 x i16>* %ptr) {
+define void @aligned_store_nxv1i16_a2(<vscale x 1 x i16> %x, ptr %ptr) {
 ; CHECK-LABEL: aligned_store_nxv1i16_a2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
@@ -298,6 +298,6 @@ define void @aligned_store_nxv1i16_a2(<vscale x 1 x i16> %x, <vscale x 1 x i16>*
 ; FAST-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
 ; FAST-NEXT:    vse16.v v8, (a0)
 ; FAST-NEXT:    ret
-  store <vscale x 1 x i16> %x, <vscale x 1 x i16>* %ptr, align 2
+  store <vscale x 1 x i16> %x, ptr %ptr, align 2
   ret void
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll
index eccd72a14f270..2926a23c8b274 100644
--- a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll
@@ -6,10 +6,10 @@
 
 declare <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vle_v_tu_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, iXLen %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vle_v_tu_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_tu_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
@@ -18,7 +18,7 @@ define <vscale x 1 x i8> @intrinsic_vle_v_tu_nxv1i8_nxv1i8(<vscale x 1 x i8> %0,
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8(
     <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
+    ptr %1,
     iXLen %2)
 
   ret <vscale x 1 x i8> %a
@@ -26,12 +26,12 @@ entry:
 
 declare <vscale x 1 x i8> @llvm.riscv.vlse(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   iXLen,
   iXLen);
 
 
-define <vscale x 1 x i8> @intrinsic_vlse_v_tu(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, iXLen %2, iXLen %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_vlse_v_tu(<vscale x 1 x i8> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_tu:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, tu, ma
@@ -40,7 +40,7 @@ define <vscale x 1 x i8> @intrinsic_vlse_v_tu(<vscale x 1 x i8> %0, <vscale x 1
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vlse(
     <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
+    ptr %1,
     iXLen %2,
     iXLen %3)
 
@@ -49,10 +49,10 @@ entry:
 
 declare { <vscale x 1 x i8>, iXLen } @llvm.riscv.vleff(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vleff_v_tu(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, iXLen %2, iXLen* %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_vleff_v_tu(<vscale x 1 x i8> %0, ptr %1, iXLen %2, iXLen* %3) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_tu:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
@@ -71,7 +71,7 @@ define <vscale x 1 x i8> @intrinsic_vleff_v_tu(<vscale x 1 x i8> %0, <vscale x 1
 entry:
   %a = call { <vscale x 1 x i8>, iXLen } @llvm.riscv.vleff(
     <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
+    ptr %1,
     iXLen %2)
   %b = extractvalue { <vscale x 1 x i8>, iXLen } %a, 0
   %c = extractvalue { <vscale x 1 x i8>, iXLen } %a, 1
@@ -81,11 +81,11 @@ entry:
 
 declare <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   <vscale x 1 x i8>,
   iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vloxei_v_tu_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_vloxei_v_tu_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_tu_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
@@ -94,7 +94,7 @@ define <vscale x 1 x i8> @intrinsic_vloxei_v_tu_nxv1i8_nxv1i8(<vscale x 1 x i8>
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8(
     <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
+    ptr %1,
     <vscale x 1 x i8> %2,
     iXLen %3)
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vle.ll b/llvm/test/CodeGen/RISCV/rvv/vle.ll
index 40d51b72ec738..a68a47f11e51d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vle.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vle.ll
@@ -6,10 +6,10 @@
 
 declare <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vle_v_nxv1i64_nxv1i64(<vscale x 1 x i64>* %0, iXLen %1) nounwind {
+define <vscale x 1 x i64> @intrinsic_vle_v_nxv1i64_nxv1i64(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -18,7 +18,7 @@ define <vscale x 1 x i64> @intrinsic_vle_v_nxv1i64_nxv1i64(<vscale x 1 x i64>* %
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64(
     <vscale x 1 x i64> undef,
-    <vscale x 1 x i64>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 1 x i64> %a
@@ -26,12 +26,12 @@ entry:
 
 declare <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vle_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vle_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -40,7 +40,7 @@ define <vscale x 1 x i64> @intrinsic_vle_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i6
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
     <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
+    ptr %1,
     <vscale x 1 x i1> %2,
     iXLen %3, iXLen 1)
 
@@ -49,10 +49,10 @@ entry:
 
 declare <vscale x 2 x i64> @llvm.riscv.vle.nxv2i64(
   <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
+  ptr,
   iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vle_v_nxv2i64_nxv2i64(<vscale x 2 x i64>* %0, iXLen %1) nounwind {
+define <vscale x 2 x i64> @intrinsic_vle_v_nxv2i64_nxv2i64(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -61,7 +61,7 @@ define <vscale x 2 x i64> @intrinsic_vle_v_nxv2i64_nxv2i64(<vscale x 2 x i64>* %
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vle.nxv2i64(
     <vscale x 2 x i64> undef,
-    <vscale x 2 x i64>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 2 x i64> %a
@@ -69,12 +69,12 @@ entry:
 
 declare <vscale x 2 x i64> @llvm.riscv.vle.mask.nxv2i64(
   <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
+  ptr,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vle_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+define <vscale x 2 x i64> @intrinsic_vle_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -83,7 +83,7 @@ define <vscale x 2 x i64> @intrinsic_vle_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i6
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vle.mask.nxv2i64(
     <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
+    ptr %1,
     <vscale x 2 x i1> %2,
     iXLen %3, iXLen 1)
 
@@ -92,10 +92,10 @@ entry:
 
 declare <vscale x 4 x i64> @llvm.riscv.vle.nxv4i64(
   <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
+  ptr,
   iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vle_v_nxv4i64_nxv4i64(<vscale x 4 x i64>* %0, iXLen %1) nounwind {
+define <vscale x 4 x i64> @intrinsic_vle_v_nxv4i64_nxv4i64(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -104,7 +104,7 @@ define <vscale x 4 x i64> @intrinsic_vle_v_nxv4i64_nxv4i64(<vscale x 4 x i64>* %
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vle.nxv4i64(
     <vscale x 4 x i64> undef,
-    <vscale x 4 x i64>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 4 x i64> %a
@@ -112,12 +112,12 @@ entry:
 
 declare <vscale x 4 x i64> @llvm.riscv.vle.mask.nxv4i64(
   <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
+  ptr,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vle_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+define <vscale x 4 x i64> @intrinsic_vle_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -126,7 +126,7 @@ define <vscale x 4 x i64> @intrinsic_vle_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i6
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vle.mask.nxv4i64(
     <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
+    ptr %1,
     <vscale x 4 x i1> %2,
     iXLen %3, iXLen 1)
 
@@ -135,10 +135,10 @@ entry:
 
 declare <vscale x 8 x i64> @llvm.riscv.vle.nxv8i64(
   <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
+  ptr,
   iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vle_v_nxv8i64_nxv8i64(<vscale x 8 x i64>* %0, iXLen %1) nounwind {
+define <vscale x 8 x i64> @intrinsic_vle_v_nxv8i64_nxv8i64(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -147,7 +147,7 @@ define <vscale x 8 x i64> @intrinsic_vle_v_nxv8i64_nxv8i64(<vscale x 8 x i64>* %
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vle.nxv8i64(
     <vscale x 8 x i64> undef,
-    <vscale x 8 x i64>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 8 x i64> %a
@@ -155,12 +155,12 @@ entry:
 
 declare <vscale x 8 x i64> @llvm.riscv.vle.mask.nxv8i64(
   <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
+  ptr,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vle_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+define <vscale x 8 x i64> @intrinsic_vle_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -169,7 +169,7 @@ define <vscale x 8 x i64> @intrinsic_vle_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i6
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vle.mask.nxv8i64(
     <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
+    ptr %1,
     <vscale x 8 x i1> %2,
     iXLen %3, iXLen 1)
 
@@ -178,10 +178,10 @@ entry:
 
 declare <vscale x 1 x double> @llvm.riscv.vle.nxv1f64(
   <vscale x 1 x double>,
-  <vscale x 1 x double>*,
+  ptr,
   iXLen);
 
-define <vscale x 1 x double> @intrinsic_vle_v_nxv1f64_nxv1f64(<vscale x 1 x double>* %0, iXLen %1) nounwind {
+define <vscale x 1 x double> @intrinsic_vle_v_nxv1f64_nxv1f64(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -190,7 +190,7 @@ define <vscale x 1 x double> @intrinsic_vle_v_nxv1f64_nxv1f64(<vscale x 1 x doub
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vle.nxv1f64(
     <vscale x 1 x double> undef,
-    <vscale x 1 x double>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 1 x double> %a
@@ -198,12 +198,12 @@ entry:
 
 declare <vscale x 1 x double> @llvm.riscv.vle.mask.nxv1f64(
   <vscale x 1 x double>,
-  <vscale x 1 x double>*,
+  ptr,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x double> @intrinsic_vle_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+define <vscale x 1 x double> @intrinsic_vle_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -212,7 +212,7 @@ define <vscale x 1 x double> @intrinsic_vle_mask_v_nxv1f64_nxv1f64(<vscale x 1 x
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vle.mask.nxv1f64(
     <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
+    ptr %1,
     <vscale x 1 x i1> %2,
     iXLen %3, iXLen 1)
 
@@ -221,10 +221,10 @@ entry:
 
 declare <vscale x 2 x double> @llvm.riscv.vle.nxv2f64(
   <vscale x 2 x double>,
-  <vscale x 2 x double>*,
+  ptr,
   iXLen);
 
-define <vscale x 2 x double> @intrinsic_vle_v_nxv2f64_nxv2f64(<vscale x 2 x double>* %0, iXLen %1) nounwind {
+define <vscale x 2 x double> @intrinsic_vle_v_nxv2f64_nxv2f64(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -233,7 +233,7 @@ define <vscale x 2 x double> @intrinsic_vle_v_nxv2f64_nxv2f64(<vscale x 2 x doub
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vle.nxv2f64(
     <vscale x 2 x double> undef,
-    <vscale x 2 x double>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 2 x double> %a
@@ -241,12 +241,12 @@ entry:
 
 declare <vscale x 2 x double> @llvm.riscv.vle.mask.nxv2f64(
   <vscale x 2 x double>,
-  <vscale x 2 x double>*,
+  ptr,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x double> @intrinsic_vle_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+define <vscale x 2 x double> @intrinsic_vle_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -255,7 +255,7 @@ define <vscale x 2 x double> @intrinsic_vle_mask_v_nxv2f64_nxv2f64(<vscale x 2 x
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vle.mask.nxv2f64(
     <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
+    ptr %1,
     <vscale x 2 x i1> %2,
     iXLen %3, iXLen 1)
 
@@ -264,10 +264,10 @@ entry:
 
 declare <vscale x 4 x double> @llvm.riscv.vle.nxv4f64(
   <vscale x 4 x double>,
-  <vscale x 4 x double>*,
+  ptr,
   iXLen);
 
-define <vscale x 4 x double> @intrinsic_vle_v_nxv4f64_nxv4f64(<vscale x 4 x double>* %0, iXLen %1) nounwind {
+define <vscale x 4 x double> @intrinsic_vle_v_nxv4f64_nxv4f64(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -276,7 +276,7 @@ define <vscale x 4 x double> @intrinsic_vle_v_nxv4f64_nxv4f64(<vscale x 4 x doub
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vle.nxv4f64(
     <vscale x 4 x double> undef,
-    <vscale x 4 x double>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 4 x double> %a
@@ -284,12 +284,12 @@ entry:
 
 declare <vscale x 4 x double> @llvm.riscv.vle.mask.nxv4f64(
   <vscale x 4 x double>,
-  <vscale x 4 x double>*,
+  ptr,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x double> @intrinsic_vle_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+define <vscale x 4 x double> @intrinsic_vle_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -298,7 +298,7 @@ define <vscale x 4 x double> @intrinsic_vle_mask_v_nxv4f64_nxv4f64(<vscale x 4 x
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vle.mask.nxv4f64(
     <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
+    ptr %1,
     <vscale x 4 x i1> %2,
     iXLen %3, iXLen 1)
 
@@ -307,10 +307,10 @@ entry:
 
 declare <vscale x 8 x double> @llvm.riscv.vle.nxv8f64(
   <vscale x 8 x double>,
-  <vscale x 8 x double>*,
+  ptr,
   iXLen);
 
-define <vscale x 8 x double> @intrinsic_vle_v_nxv8f64_nxv8f64(<vscale x 8 x double>* %0, iXLen %1) nounwind {
+define <vscale x 8 x double> @intrinsic_vle_v_nxv8f64_nxv8f64(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv8f64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -319,7 +319,7 @@ define <vscale x 8 x double> @intrinsic_vle_v_nxv8f64_nxv8f64(<vscale x 8 x doub
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vle.nxv8f64(
     <vscale x 8 x double> undef,
-    <vscale x 8 x double>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 8 x double> %a
@@ -327,12 +327,12 @@ entry:
 
 declare <vscale x 8 x double> @llvm.riscv.vle.mask.nxv8f64(
   <vscale x 8 x double>,
-  <vscale x 8 x double>*,
+  ptr,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x double> @intrinsic_vle_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+define <vscale x 8 x double> @intrinsic_vle_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -341,7 +341,7 @@ define <vscale x 8 x double> @intrinsic_vle_mask_v_nxv8f64_nxv8f64(<vscale x 8 x
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vle.mask.nxv8f64(
     <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
+    ptr %1,
     <vscale x 8 x i1> %2,
     iXLen %3, iXLen 1)
 
@@ -350,10 +350,10 @@ entry:
 
 declare <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32(
   <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
+  ptr,
   iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vle_v_nxv1i32_nxv1i32(<vscale x 1 x i32>* %0, iXLen %1) nounwind {
+define <vscale x 1 x i32> @intrinsic_vle_v_nxv1i32_nxv1i32(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -362,7 +362,7 @@ define <vscale x 1 x i32> @intrinsic_vle_v_nxv1i32_nxv1i32(<vscale x 1 x i32>* %
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32(
     <vscale x 1 x i32> undef,
-    <vscale x 1 x i32>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 1 x i32> %a
@@ -370,12 +370,12 @@ entry:
 
 declare <vscale x 1 x i32> @llvm.riscv.vle.mask.nxv1i32(
   <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
+  ptr,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vle_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vle_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -384,7 +384,7 @@ define <vscale x 1 x i32> @intrinsic_vle_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i3
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vle.mask.nxv1i32(
     <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
+    ptr %1,
     <vscale x 1 x i1> %2,
     iXLen %3, iXLen 1)
 
@@ -393,10 +393,10 @@ entry:
 
 declare <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32(
   <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
+  ptr,
   iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vle_v_nxv2i32_nxv2i32(<vscale x 2 x i32>* %0, iXLen %1) nounwind {
+define <vscale x 2 x i32> @intrinsic_vle_v_nxv2i32_nxv2i32(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -405,7 +405,7 @@ define <vscale x 2 x i32> @intrinsic_vle_v_nxv2i32_nxv2i32(<vscale x 2 x i32>* %
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32(
     <vscale x 2 x i32> undef,
-    <vscale x 2 x i32>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 2 x i32> %a
@@ -413,12 +413,12 @@ entry:
 
 declare <vscale x 2 x i32> @llvm.riscv.vle.mask.nxv2i32(
   <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
+  ptr,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vle_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vle_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -427,7 +427,7 @@ define <vscale x 2 x i32> @intrinsic_vle_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i3
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vle.mask.nxv2i32(
     <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
+    ptr %1,
     <vscale x 2 x i1> %2,
     iXLen %3, iXLen 1)
 
@@ -436,10 +436,10 @@ entry:
 
 declare <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vle_v_nxv4i32_nxv4i32(<vscale x 4 x i32>* %0, iXLen %1) nounwind {
+define <vscale x 4 x i32> @intrinsic_vle_v_nxv4i32_nxv4i32(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -448,7 +448,7 @@ define <vscale x 4 x i32> @intrinsic_vle_v_nxv4i32_nxv4i32(<vscale x 4 x i32>* %
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32(
     <vscale x 4 x i32> undef,
-    <vscale x 4 x i32>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 4 x i32> %a
@@ -456,12 +456,12 @@ entry:
 
 declare <vscale x 4 x i32> @llvm.riscv.vle.mask.nxv4i32(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vle_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vle_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -470,7 +470,7 @@ define <vscale x 4 x i32> @intrinsic_vle_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i3
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vle.mask.nxv4i32(
     <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
+    ptr %1,
     <vscale x 4 x i1> %2,
     iXLen %3, iXLen 1)
 
@@ -479,10 +479,10 @@ entry:
 
 declare <vscale x 8 x i32> @llvm.riscv.vle.nxv8i32(
   <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
+  ptr,
   iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vle_v_nxv8i32_nxv8i32(<vscale x 8 x i32>* %0, iXLen %1) nounwind {
+define <vscale x 8 x i32> @intrinsic_vle_v_nxv8i32_nxv8i32(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -491,7 +491,7 @@ define <vscale x 8 x i32> @intrinsic_vle_v_nxv8i32_nxv8i32(<vscale x 8 x i32>* %
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vle.nxv8i32(
     <vscale x 8 x i32> undef,
-    <vscale x 8 x i32>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 8 x i32> %a
@@ -499,12 +499,12 @@ entry:
 
 declare <vscale x 8 x i32> @llvm.riscv.vle.mask.nxv8i32(
   <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
+  ptr,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vle_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vle_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -513,7 +513,7 @@ define <vscale x 8 x i32> @intrinsic_vle_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i3
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vle.mask.nxv8i32(
     <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
+    ptr %1,
     <vscale x 8 x i1> %2,
     iXLen %3, iXLen 1)
 
@@ -522,10 +522,10 @@ entry:
 
 declare <vscale x 16 x i32> @llvm.riscv.vle.nxv16i32(
   <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
+  ptr,
   iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vle_v_nxv16i32_nxv16i32(<vscale x 16 x i32>* %0, iXLen %1) nounwind {
+define <vscale x 16 x i32> @intrinsic_vle_v_nxv16i32_nxv16i32(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -534,7 +534,7 @@ define <vscale x 16 x i32> @intrinsic_vle_v_nxv16i32_nxv16i32(<vscale x 16 x i32
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vle.nxv16i32(
     <vscale x 16 x i32> undef,
-    <vscale x 16 x i32>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 16 x i32> %a
@@ -542,12 +542,12 @@ entry:
 
 declare <vscale x 16 x i32> @llvm.riscv.vle.mask.nxv16i32(
   <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
+  ptr,
   <vscale x 16 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vle_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+define <vscale x 16 x i32> @intrinsic_vle_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -556,7 +556,7 @@ define <vscale x 16 x i32> @intrinsic_vle_mask_v_nxv16i32_nxv16i32(<vscale x 16
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vle.mask.nxv16i32(
     <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
+    ptr %1,
     <vscale x 16 x i1> %2,
     iXLen %3, iXLen 1)
 
@@ -565,10 +565,10 @@ entry:
 
 declare <vscale x 1 x float> @llvm.riscv.vle.nxv1f32(
   <vscale x 1 x float>,
-  <vscale x 1 x float>*,
+  ptr,
   iXLen);
 
-define <vscale x 1 x float> @intrinsic_vle_v_nxv1f32_nxv1f32(<vscale x 1 x float>* %0, iXLen %1) nounwind {
+define <vscale x 1 x float> @intrinsic_vle_v_nxv1f32_nxv1f32(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -577,7 +577,7 @@ define <vscale x 1 x float> @intrinsic_vle_v_nxv1f32_nxv1f32(<vscale x 1 x float
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vle.nxv1f32(
     <vscale x 1 x float> undef,
-    <vscale x 1 x float>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 1 x float> %a
@@ -585,12 +585,12 @@ entry:
 
 declare <vscale x 1 x float> @llvm.riscv.vle.mask.nxv1f32(
   <vscale x 1 x float>,
-  <vscale x 1 x float>*,
+  ptr,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x float> @intrinsic_vle_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+define <vscale x 1 x float> @intrinsic_vle_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -599,7 +599,7 @@ define <vscale x 1 x float> @intrinsic_vle_mask_v_nxv1f32_nxv1f32(<vscale x 1 x
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vle.mask.nxv1f32(
     <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
+    ptr %1,
     <vscale x 1 x i1> %2,
     iXLen %3, iXLen 1)
 
@@ -608,10 +608,10 @@ entry:
 
 declare <vscale x 2 x float> @llvm.riscv.vle.nxv2f32(
   <vscale x 2 x float>,
-  <vscale x 2 x float>*,
+  ptr,
   iXLen);
 
-define <vscale x 2 x float> @intrinsic_vle_v_nxv2f32_nxv2f32(<vscale x 2 x float>* %0, iXLen %1) nounwind {
+define <vscale x 2 x float> @intrinsic_vle_v_nxv2f32_nxv2f32(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -620,7 +620,7 @@ define <vscale x 2 x float> @intrinsic_vle_v_nxv2f32_nxv2f32(<vscale x 2 x float
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vle.nxv2f32(
     <vscale x 2 x float> undef,
-    <vscale x 2 x float>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 2 x float> %a
@@ -628,12 +628,12 @@ entry:
 
 declare <vscale x 2 x float> @llvm.riscv.vle.mask.nxv2f32(
   <vscale x 2 x float>,
-  <vscale x 2 x float>*,
+  ptr,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x float> @intrinsic_vle_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vle_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -642,7 +642,7 @@ define <vscale x 2 x float> @intrinsic_vle_mask_v_nxv2f32_nxv2f32(<vscale x 2 x
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vle.mask.nxv2f32(
     <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
+    ptr %1,
     <vscale x 2 x i1> %2,
     iXLen %3, iXLen 1)
 
@@ -651,10 +651,10 @@ entry:
 
 declare <vscale x 4 x float> @llvm.riscv.vle.nxv4f32(
   <vscale x 4 x float>,
-  <vscale x 4 x float>*,
+  ptr,
   iXLen);
 
-define <vscale x 4 x float> @intrinsic_vle_v_nxv4f32_nxv4f32(<vscale x 4 x float>* %0, iXLen %1) nounwind {
+define <vscale x 4 x float> @intrinsic_vle_v_nxv4f32_nxv4f32(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -663,7 +663,7 @@ define <vscale x 4 x float> @intrinsic_vle_v_nxv4f32_nxv4f32(<vscale x 4 x float
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vle.nxv4f32(
     <vscale x 4 x float> undef,
-    <vscale x 4 x float>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 4 x float> %a
@@ -671,12 +671,12 @@ entry:
 
 declare <vscale x 4 x float> @llvm.riscv.vle.mask.nxv4f32(
   <vscale x 4 x float>,
-  <vscale x 4 x float>*,
+  ptr,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x float> @intrinsic_vle_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+define <vscale x 4 x float> @intrinsic_vle_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -685,7 +685,7 @@ define <vscale x 4 x float> @intrinsic_vle_mask_v_nxv4f32_nxv4f32(<vscale x 4 x
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vle.mask.nxv4f32(
     <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
+    ptr %1,
     <vscale x 4 x i1> %2,
     iXLen %3, iXLen 1)
 
@@ -694,10 +694,10 @@ entry:
 
 declare <vscale x 8 x float> @llvm.riscv.vle.nxv8f32(
   <vscale x 8 x float>,
-  <vscale x 8 x float>*,
+  ptr,
   iXLen);
 
-define <vscale x 8 x float> @intrinsic_vle_v_nxv8f32_nxv8f32(<vscale x 8 x float>* %0, iXLen %1) nounwind {
+define <vscale x 8 x float> @intrinsic_vle_v_nxv8f32_nxv8f32(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -706,7 +706,7 @@ define <vscale x 8 x float> @intrinsic_vle_v_nxv8f32_nxv8f32(<vscale x 8 x float
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vle.nxv8f32(
     <vscale x 8 x float> undef,
-    <vscale x 8 x float>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 8 x float> %a
@@ -714,12 +714,12 @@ entry:
 
 declare <vscale x 8 x float> @llvm.riscv.vle.mask.nxv8f32(
   <vscale x 8 x float>,
-  <vscale x 8 x float>*,
+  ptr,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x float> @intrinsic_vle_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+define <vscale x 8 x float> @intrinsic_vle_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -728,7 +728,7 @@ define <vscale x 8 x float> @intrinsic_vle_mask_v_nxv8f32_nxv8f32(<vscale x 8 x
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vle.mask.nxv8f32(
     <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
+    ptr %1,
     <vscale x 8 x i1> %2,
     iXLen %3, iXLen 1)
 
@@ -737,10 +737,10 @@ entry:
 
 declare <vscale x 16 x float> @llvm.riscv.vle.nxv16f32(
   <vscale x 16 x float>,
-  <vscale x 16 x float>*,
+  ptr,
   iXLen);
 
-define <vscale x 16 x float> @intrinsic_vle_v_nxv16f32_nxv16f32(<vscale x 16 x float>* %0, iXLen %1) nounwind {
+define <vscale x 16 x float> @intrinsic_vle_v_nxv16f32_nxv16f32(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv16f32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -749,7 +749,7 @@ define <vscale x 16 x float> @intrinsic_vle_v_nxv16f32_nxv16f32(<vscale x 16 x f
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vle.nxv16f32(
     <vscale x 16 x float> undef,
-    <vscale x 16 x float>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 16 x float> %a
@@ -757,12 +757,12 @@ entry:
 
 declare <vscale x 16 x float> @llvm.riscv.vle.mask.nxv16f32(
   <vscale x 16 x float>,
-  <vscale x 16 x float>*,
+  ptr,
   <vscale x 16 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 16 x float> @intrinsic_vle_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+define <vscale x 16 x float> @intrinsic_vle_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16f32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -771,7 +771,7 @@ define <vscale x 16 x float> @intrinsic_vle_mask_v_nxv16f32_nxv16f32(<vscale x 1
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vle.mask.nxv16f32(
     <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
+    ptr %1,
     <vscale x 16 x i1> %2,
     iXLen %3, iXLen 1)
 
@@ -780,10 +780,10 @@ entry:
 
 declare <vscale x 1 x i16> @llvm.riscv.vle.nxv1i16(
   <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
+  ptr,
   iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vle_v_nxv1i16_nxv1i16(<vscale x 1 x i16>* %0, iXLen %1) nounwind {
+define <vscale x 1 x i16> @intrinsic_vle_v_nxv1i16_nxv1i16(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -792,7 +792,7 @@ define <vscale x 1 x i16> @intrinsic_vle_v_nxv1i16_nxv1i16(<vscale x 1 x i16>* %
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vle.nxv1i16(
     <vscale x 1 x i16> undef,
-    <vscale x 1 x i16>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 1 x i16> %a
@@ -800,12 +800,12 @@ entry:
 
 declare <vscale x 1 x i16> @llvm.riscv.vle.mask.nxv1i16(
   <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
+  ptr,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vle_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vle_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -814,7 +814,7 @@ define <vscale x 1 x i16> @intrinsic_vle_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i1
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vle.mask.nxv1i16(
     <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
+    ptr %1,
     <vscale x 1 x i1> %2,
     iXLen %3, iXLen 1)
 
@@ -823,10 +823,10 @@ entry:
 
 declare <vscale x 2 x i16> @llvm.riscv.vle.nxv2i16(
   <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
+  ptr,
   iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vle_v_nxv2i16_nxv2i16(<vscale x 2 x i16>* %0, iXLen %1) nounwind {
+define <vscale x 2 x i16> @intrinsic_vle_v_nxv2i16_nxv2i16(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -835,7 +835,7 @@ define <vscale x 2 x i16> @intrinsic_vle_v_nxv2i16_nxv2i16(<vscale x 2 x i16>* %
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vle.nxv2i16(
     <vscale x 2 x i16> undef,
-    <vscale x 2 x i16>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 2 x i16> %a
@@ -843,12 +843,12 @@ entry:
 
 declare <vscale x 2 x i16> @llvm.riscv.vle.mask.nxv2i16(
   <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
+  ptr,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vle_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vle_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -857,7 +857,7 @@ define <vscale x 2 x i16> @intrinsic_vle_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i1
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vle.mask.nxv2i16(
     <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
+    ptr %1,
     <vscale x 2 x i1> %2,
     iXLen %3, iXLen 1)
 
@@ -866,10 +866,10 @@ entry:
 
 declare <vscale x 4 x i16> @llvm.riscv.vle.nxv4i16(
   <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
+  ptr,
   iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vle_v_nxv4i16_nxv4i16(<vscale x 4 x i16>* %0, iXLen %1) nounwind {
+define <vscale x 4 x i16> @intrinsic_vle_v_nxv4i16_nxv4i16(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -878,7 +878,7 @@ define <vscale x 4 x i16> @intrinsic_vle_v_nxv4i16_nxv4i16(<vscale x 4 x i16>* %
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vle.nxv4i16(
     <vscale x 4 x i16> undef,
-    <vscale x 4 x i16>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 4 x i16> %a
@@ -886,12 +886,12 @@ entry:
 
 declare <vscale x 4 x i16> @llvm.riscv.vle.mask.nxv4i16(
   <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
+  ptr,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vle_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vle_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -900,7 +900,7 @@ define <vscale x 4 x i16> @intrinsic_vle_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i1
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vle.mask.nxv4i16(
     <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
+    ptr %1,
     <vscale x 4 x i1> %2,
     iXLen %3, iXLen 1)
 
@@ -909,10 +909,10 @@ entry:
 
 declare <vscale x 8 x i16> @llvm.riscv.vle.nxv8i16(
   <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
+  ptr,
   iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vle_v_nxv8i16_nxv8i16(<vscale x 8 x i16>* %0, iXLen %1) nounwind {
+define <vscale x 8 x i16> @intrinsic_vle_v_nxv8i16_nxv8i16(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -921,7 +921,7 @@ define <vscale x 8 x i16> @intrinsic_vle_v_nxv8i16_nxv8i16(<vscale x 8 x i16>* %
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vle.nxv8i16(
     <vscale x 8 x i16> undef,
-    <vscale x 8 x i16>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 8 x i16> %a
@@ -929,12 +929,12 @@ entry:
 
 declare <vscale x 8 x i16> @llvm.riscv.vle.mask.nxv8i16(
   <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
+  ptr,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vle_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vle_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -943,7 +943,7 @@ define <vscale x 8 x i16> @intrinsic_vle_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i1
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vle.mask.nxv8i16(
     <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
+    ptr %1,
     <vscale x 8 x i1> %2,
     iXLen %3, iXLen 1)
 
@@ -952,10 +952,10 @@ entry:
 
 declare <vscale x 16 x i16> @llvm.riscv.vle.nxv16i16(
   <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
+  ptr,
   iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vle_v_nxv16i16_nxv16i16(<vscale x 16 x i16>* %0, iXLen %1) nounwind {
+define <vscale x 16 x i16> @intrinsic_vle_v_nxv16i16_nxv16i16(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -964,7 +964,7 @@ define <vscale x 16 x i16> @intrinsic_vle_v_nxv16i16_nxv16i16(<vscale x 16 x i16
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vle.nxv16i16(
     <vscale x 16 x i16> undef,
-    <vscale x 16 x i16>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 16 x i16> %a
@@ -972,12 +972,12 @@ entry:
 
 declare <vscale x 16 x i16> @llvm.riscv.vle.mask.nxv16i16(
   <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
+  ptr,
   <vscale x 16 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vle_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vle_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -986,7 +986,7 @@ define <vscale x 16 x i16> @intrinsic_vle_mask_v_nxv16i16_nxv16i16(<vscale x 16
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vle.mask.nxv16i16(
     <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
+    ptr %1,
     <vscale x 16 x i1> %2,
     iXLen %3, iXLen 1)
 
@@ -995,10 +995,10 @@ entry:
 
 declare <vscale x 32 x i16> @llvm.riscv.vle.nxv32i16(
   <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
+  ptr,
   iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vle_v_nxv32i16_nxv32i16(<vscale x 32 x i16>* %0, iXLen %1) nounwind {
+define <vscale x 32 x i16> @intrinsic_vle_v_nxv32i16_nxv32i16(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -1007,7 +1007,7 @@ define <vscale x 32 x i16> @intrinsic_vle_v_nxv32i16_nxv32i16(<vscale x 32 x i16
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vle.nxv32i16(
     <vscale x 32 x i16> undef,
-    <vscale x 32 x i16>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 32 x i16> %a
@@ -1015,12 +1015,12 @@ entry:
 
 declare <vscale x 32 x i16> @llvm.riscv.vle.mask.nxv32i16(
   <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
+  ptr,
   <vscale x 32 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vle_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+define <vscale x 32 x i16> @intrinsic_vle_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -1029,7 +1029,7 @@ define <vscale x 32 x i16> @intrinsic_vle_mask_v_nxv32i16_nxv32i16(<vscale x 32
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vle.mask.nxv32i16(
     <vscale x 32 x i16> %0,
-    <vscale x 32 x i16>* %1,
+    ptr %1,
     <vscale x 32 x i1> %2,
     iXLen %3, iXLen 1)
 
@@ -1038,10 +1038,10 @@ entry:
 
 declare <vscale x 1 x half> @llvm.riscv.vle.nxv1f16(
   <vscale x 1 x half>,
-  <vscale x 1 x half>*,
+  ptr,
   iXLen);
 
-define <vscale x 1 x half> @intrinsic_vle_v_nxv1f16_nxv1f16(<vscale x 1 x half>* %0, iXLen %1) nounwind {
+define <vscale x 1 x half> @intrinsic_vle_v_nxv1f16_nxv1f16(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1050,7 +1050,7 @@ define <vscale x 1 x half> @intrinsic_vle_v_nxv1f16_nxv1f16(<vscale x 1 x half>*
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vle.nxv1f16(
     <vscale x 1 x half> undef,
-    <vscale x 1 x half>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 1 x half> %a
@@ -1058,12 +1058,12 @@ entry:
 
 declare <vscale x 1 x half> @llvm.riscv.vle.mask.nxv1f16(
   <vscale x 1 x half>,
-  <vscale x 1 x half>*,
+  ptr,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x half> @intrinsic_vle_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+define <vscale x 1 x half> @intrinsic_vle_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1072,7 +1072,7 @@ define <vscale x 1 x half> @intrinsic_vle_mask_v_nxv1f16_nxv1f16(<vscale x 1 x h
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vle.mask.nxv1f16(
     <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
+    ptr %1,
     <vscale x 1 x i1> %2,
     iXLen %3, iXLen 1)
 
@@ -1081,10 +1081,10 @@ entry:
 
 declare <vscale x 2 x half> @llvm.riscv.vle.nxv2f16(
   <vscale x 2 x half>,
-  <vscale x 2 x half>*,
+  ptr,
   iXLen);
 
-define <vscale x 2 x half> @intrinsic_vle_v_nxv2f16_nxv2f16(<vscale x 2 x half>* %0, iXLen %1) nounwind {
+define <vscale x 2 x half> @intrinsic_vle_v_nxv2f16_nxv2f16(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1093,7 +1093,7 @@ define <vscale x 2 x half> @intrinsic_vle_v_nxv2f16_nxv2f16(<vscale x 2 x half>*
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vle.nxv2f16(
     <vscale x 2 x half> undef,
-    <vscale x 2 x half>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 2 x half> %a
@@ -1101,12 +1101,12 @@ entry:
 
 declare <vscale x 2 x half> @llvm.riscv.vle.mask.nxv2f16(
   <vscale x 2 x half>,
-  <vscale x 2 x half>*,
+  ptr,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x half> @intrinsic_vle_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+define <vscale x 2 x half> @intrinsic_vle_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1115,7 +1115,7 @@ define <vscale x 2 x half> @intrinsic_vle_mask_v_nxv2f16_nxv2f16(<vscale x 2 x h
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vle.mask.nxv2f16(
     <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
+    ptr %1,
     <vscale x 2 x i1> %2,
     iXLen %3, iXLen 1)
 
@@ -1124,10 +1124,10 @@ entry:
 
 declare <vscale x 4 x half> @llvm.riscv.vle.nxv4f16(
   <vscale x 4 x half>,
-  <vscale x 4 x half>*,
+  ptr,
   iXLen);
 
-define <vscale x 4 x half> @intrinsic_vle_v_nxv4f16_nxv4f16(<vscale x 4 x half>* %0, iXLen %1) nounwind {
+define <vscale x 4 x half> @intrinsic_vle_v_nxv4f16_nxv4f16(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1136,7 +1136,7 @@ define <vscale x 4 x half> @intrinsic_vle_v_nxv4f16_nxv4f16(<vscale x 4 x half>*
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vle.nxv4f16(
     <vscale x 4 x half> undef,
-    <vscale x 4 x half>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 4 x half> %a
@@ -1144,12 +1144,12 @@ entry:
 
 declare <vscale x 4 x half> @llvm.riscv.vle.mask.nxv4f16(
   <vscale x 4 x half>,
-  <vscale x 4 x half>*,
+  ptr,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x half> @intrinsic_vle_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+define <vscale x 4 x half> @intrinsic_vle_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1158,7 +1158,7 @@ define <vscale x 4 x half> @intrinsic_vle_mask_v_nxv4f16_nxv4f16(<vscale x 4 x h
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vle.mask.nxv4f16(
     <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
+    ptr %1,
     <vscale x 4 x i1> %2,
     iXLen %3, iXLen 1)
 
@@ -1167,10 +1167,10 @@ entry:
 
 declare <vscale x 8 x half> @llvm.riscv.vle.nxv8f16(
   <vscale x 8 x half>,
-  <vscale x 8 x half>*,
+  ptr,
   iXLen);
 
-define <vscale x 8 x half> @intrinsic_vle_v_nxv8f16_nxv8f16(<vscale x 8 x half>* %0, iXLen %1) nounwind {
+define <vscale x 8 x half> @intrinsic_vle_v_nxv8f16_nxv8f16(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1179,7 +1179,7 @@ define <vscale x 8 x half> @intrinsic_vle_v_nxv8f16_nxv8f16(<vscale x 8 x half>*
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vle.nxv8f16(
     <vscale x 8 x half> undef,
-    <vscale x 8 x half>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 8 x half> %a
@@ -1187,12 +1187,12 @@ entry:
 
 declare <vscale x 8 x half> @llvm.riscv.vle.mask.nxv8f16(
   <vscale x 8 x half>,
-  <vscale x 8 x half>*,
+  ptr,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x half> @intrinsic_vle_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+define <vscale x 8 x half> @intrinsic_vle_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1201,7 +1201,7 @@ define <vscale x 8 x half> @intrinsic_vle_mask_v_nxv8f16_nxv8f16(<vscale x 8 x h
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vle.mask.nxv8f16(
     <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
+    ptr %1,
     <vscale x 8 x i1> %2,
     iXLen %3, iXLen 1)
 
@@ -1210,10 +1210,10 @@ entry:
 
 declare <vscale x 16 x half> @llvm.riscv.vle.nxv16f16(
   <vscale x 16 x half>,
-  <vscale x 16 x half>*,
+  ptr,
   iXLen);
 
-define <vscale x 16 x half> @intrinsic_vle_v_nxv16f16_nxv16f16(<vscale x 16 x half>* %0, iXLen %1) nounwind {
+define <vscale x 16 x half> @intrinsic_vle_v_nxv16f16_nxv16f16(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1222,7 +1222,7 @@ define <vscale x 16 x half> @intrinsic_vle_v_nxv16f16_nxv16f16(<vscale x 16 x ha
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vle.nxv16f16(
     <vscale x 16 x half> undef,
-    <vscale x 16 x half>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 16 x half> %a
@@ -1230,12 +1230,12 @@ entry:
 
 declare <vscale x 16 x half> @llvm.riscv.vle.mask.nxv16f16(
   <vscale x 16 x half>,
-  <vscale x 16 x half>*,
+  ptr,
   <vscale x 16 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 16 x half> @intrinsic_vle_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+define <vscale x 16 x half> @intrinsic_vle_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1244,7 +1244,7 @@ define <vscale x 16 x half> @intrinsic_vle_mask_v_nxv16f16_nxv16f16(<vscale x 16
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vle.mask.nxv16f16(
     <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
+    ptr %1,
     <vscale x 16 x i1> %2,
     iXLen %3, iXLen 1)
 
@@ -1253,10 +1253,10 @@ entry:
 
 declare <vscale x 32 x half> @llvm.riscv.vle.nxv32f16(
   <vscale x 32 x half>,
-  <vscale x 32 x half>*,
+  ptr,
   iXLen);
 
-define <vscale x 32 x half> @intrinsic_vle_v_nxv32f16_nxv32f16(<vscale x 32 x half>* %0, iXLen %1) nounwind {
+define <vscale x 32 x half> @intrinsic_vle_v_nxv32f16_nxv32f16(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv32f16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -1265,7 +1265,7 @@ define <vscale x 32 x half> @intrinsic_vle_v_nxv32f16_nxv32f16(<vscale x 32 x ha
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vle.nxv32f16(
     <vscale x 32 x half> undef,
-    <vscale x 32 x half>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 32 x half> %a
@@ -1273,12 +1273,12 @@ entry:
 
 declare <vscale x 32 x half> @llvm.riscv.vle.mask.nxv32f16(
   <vscale x 32 x half>,
-  <vscale x 32 x half>*,
+  ptr,
   <vscale x 32 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 32 x half> @intrinsic_vle_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+define <vscale x 32 x half> @intrinsic_vle_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv32f16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -1287,7 +1287,7 @@ define <vscale x 32 x half> @intrinsic_vle_mask_v_nxv32f16_nxv32f16(<vscale x 32
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vle.mask.nxv32f16(
     <vscale x 32 x half> %0,
-    <vscale x 32 x half>* %1,
+    ptr %1,
     <vscale x 32 x i1> %2,
     iXLen %3, iXLen 1)
 
@@ -1296,10 +1296,10 @@ entry:
 
 declare <vscale x 1 x bfloat> @llvm.riscv.vle.nxv1bf16(
   <vscale x 1 x bfloat>,
-  <vscale x 1 x bfloat>*,
+  ptr,
   iXLen);
 
-define <vscale x 1 x bfloat> @intrinsic_vle_v_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat>* %0, iXLen %1) nounwind {
+define <vscale x 1 x bfloat> @intrinsic_vle_v_nxv1bf16_nxv1bf16(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv1bf16_nxv1bf16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1308,7 +1308,7 @@ define <vscale x 1 x bfloat> @intrinsic_vle_v_nxv1bf16_nxv1bf16(<vscale x 1 x bf
 entry:
   %a = call <vscale x 1 x bfloat> @llvm.riscv.vle.nxv1bf16(
     <vscale x 1 x bfloat> undef,
-    <vscale x 1 x bfloat>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 1 x bfloat> %a
@@ -1316,12 +1316,12 @@ entry:
 
 declare <vscale x 1 x bfloat> @llvm.riscv.vle.mask.nxv1bf16(
   <vscale x 1 x bfloat>,
-  <vscale x 1 x bfloat>*,
+  ptr,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x bfloat> @intrinsic_vle_mask_v_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+define <vscale x 1 x bfloat> @intrinsic_vle_mask_v_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1bf16_nxv1bf16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1330,7 +1330,7 @@ define <vscale x 1 x bfloat> @intrinsic_vle_mask_v_nxv1bf16_nxv1bf16(<vscale x 1
 entry:
   %a = call <vscale x 1 x bfloat> @llvm.riscv.vle.mask.nxv1bf16(
     <vscale x 1 x bfloat> %0,
-    <vscale x 1 x bfloat>* %1,
+    ptr %1,
     <vscale x 1 x i1> %2,
     iXLen %3, iXLen 1)
 
@@ -1339,10 +1339,10 @@ entry:
 
 declare <vscale x 2 x bfloat> @llvm.riscv.vle.nxv2bf16(
   <vscale x 2 x bfloat>,
-  <vscale x 2 x bfloat>*,
+  ptr,
   iXLen);
 
-define <vscale x 2 x bfloat> @intrinsic_vle_v_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat>* %0, iXLen %1) nounwind {
+define <vscale x 2 x bfloat> @intrinsic_vle_v_nxv2bf16_nxv2bf16(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv2bf16_nxv2bf16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1351,7 +1351,7 @@ define <vscale x 2 x bfloat> @intrinsic_vle_v_nxv2bf16_nxv2bf16(<vscale x 2 x bf
 entry:
   %a = call <vscale x 2 x bfloat> @llvm.riscv.vle.nxv2bf16(
     <vscale x 2 x bfloat> undef,
-    <vscale x 2 x bfloat>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 2 x bfloat> %a
@@ -1359,12 +1359,12 @@ entry:
 
 declare <vscale x 2 x bfloat> @llvm.riscv.vle.mask.nxv2bf16(
   <vscale x 2 x bfloat>,
-  <vscale x 2 x bfloat>*,
+  ptr,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x bfloat> @intrinsic_vle_mask_v_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat>* %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+define <vscale x 2 x bfloat> @intrinsic_vle_mask_v_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2bf16_nxv2bf16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1373,7 +1373,7 @@ define <vscale x 2 x bfloat> @intrinsic_vle_mask_v_nxv2bf16_nxv2bf16(<vscale x 2
 entry:
   %a = call <vscale x 2 x bfloat> @llvm.riscv.vle.mask.nxv2bf16(
     <vscale x 2 x bfloat> %0,
-    <vscale x 2 x bfloat>* %1,
+    ptr %1,
     <vscale x 2 x i1> %2,
     iXLen %3, iXLen 1)
 
@@ -1382,10 +1382,10 @@ entry:
 
 declare <vscale x 4 x bfloat> @llvm.riscv.vle.nxv4bf16(
   <vscale x 4 x bfloat>,
-  <vscale x 4 x bfloat>*,
+  ptr,
   iXLen);
 
-define <vscale x 4 x bfloat> @intrinsic_vle_v_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat>* %0, iXLen %1) nounwind {
+define <vscale x 4 x bfloat> @intrinsic_vle_v_nxv4bf16_nxv4bf16(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv4bf16_nxv4bf16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1394,7 +1394,7 @@ define <vscale x 4 x bfloat> @intrinsic_vle_v_nxv4bf16_nxv4bf16(<vscale x 4 x bf
 entry:
   %a = call <vscale x 4 x bfloat> @llvm.riscv.vle.nxv4bf16(
     <vscale x 4 x bfloat> undef,
-    <vscale x 4 x bfloat>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 4 x bfloat> %a
@@ -1402,12 +1402,12 @@ entry:
 
 declare <vscale x 4 x bfloat> @llvm.riscv.vle.mask.nxv4bf16(
   <vscale x 4 x bfloat>,
-  <vscale x 4 x bfloat>*,
+  ptr,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x bfloat> @intrinsic_vle_mask_v_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat>* %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+define <vscale x 4 x bfloat> @intrinsic_vle_mask_v_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4bf16_nxv4bf16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1416,7 +1416,7 @@ define <vscale x 4 x bfloat> @intrinsic_vle_mask_v_nxv4bf16_nxv4bf16(<vscale x 4
 entry:
   %a = call <vscale x 4 x bfloat> @llvm.riscv.vle.mask.nxv4bf16(
     <vscale x 4 x bfloat> %0,
-    <vscale x 4 x bfloat>* %1,
+    ptr %1,
     <vscale x 4 x i1> %2,
     iXLen %3, iXLen 1)
 
@@ -1425,10 +1425,10 @@ entry:
 
 declare <vscale x 8 x bfloat> @llvm.riscv.vle.nxv8bf16(
   <vscale x 8 x bfloat>,
-  <vscale x 8 x bfloat>*,
+  ptr,
   iXLen);
 
-define <vscale x 8 x bfloat> @intrinsic_vle_v_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat>* %0, iXLen %1) nounwind {
+define <vscale x 8 x bfloat> @intrinsic_vle_v_nxv8bf16_nxv8bf16(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv8bf16_nxv8bf16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1437,7 +1437,7 @@ define <vscale x 8 x bfloat> @intrinsic_vle_v_nxv8bf16_nxv8bf16(<vscale x 8 x bf
 entry:
   %a = call <vscale x 8 x bfloat> @llvm.riscv.vle.nxv8bf16(
     <vscale x 8 x bfloat> undef,
-    <vscale x 8 x bfloat>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 8 x bfloat> %a
@@ -1445,12 +1445,12 @@ entry:
 
 declare <vscale x 8 x bfloat> @llvm.riscv.vle.mask.nxv8bf16(
   <vscale x 8 x bfloat>,
-  <vscale x 8 x bfloat>*,
+  ptr,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x bfloat> @intrinsic_vle_mask_v_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat>* %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+define <vscale x 8 x bfloat> @intrinsic_vle_mask_v_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8bf16_nxv8bf16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1459,7 +1459,7 @@ define <vscale x 8 x bfloat> @intrinsic_vle_mask_v_nxv8bf16_nxv8bf16(<vscale x 8
 entry:
   %a = call <vscale x 8 x bfloat> @llvm.riscv.vle.mask.nxv8bf16(
     <vscale x 8 x bfloat> %0,
-    <vscale x 8 x bfloat>* %1,
+    ptr %1,
     <vscale x 8 x i1> %2,
     iXLen %3, iXLen 1)
 
@@ -1468,10 +1468,10 @@ entry:
 
 declare <vscale x 16 x bfloat> @llvm.riscv.vle.nxv16bf16(
   <vscale x 16 x bfloat>,
-  <vscale x 16 x bfloat>*,
+  ptr,
   iXLen);
 
-define <vscale x 16 x bfloat> @intrinsic_vle_v_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat>* %0, iXLen %1) nounwind {
+define <vscale x 16 x bfloat> @intrinsic_vle_v_nxv16bf16_nxv16bf16(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv16bf16_nxv16bf16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1480,7 +1480,7 @@ define <vscale x 16 x bfloat> @intrinsic_vle_v_nxv16bf16_nxv16bf16(<vscale x 16
 entry:
   %a = call <vscale x 16 x bfloat> @llvm.riscv.vle.nxv16bf16(
     <vscale x 16 x bfloat> undef,
-    <vscale x 16 x bfloat>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 16 x bfloat> %a
@@ -1488,12 +1488,12 @@ entry:
 
 declare <vscale x 16 x bfloat> @llvm.riscv.vle.mask.nxv16bf16(
   <vscale x 16 x bfloat>,
-  <vscale x 16 x bfloat>*,
+  ptr,
   <vscale x 16 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 16 x bfloat> @intrinsic_vle_mask_v_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat>* %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+define <vscale x 16 x bfloat> @intrinsic_vle_mask_v_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16bf16_nxv16bf16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1502,7 +1502,7 @@ define <vscale x 16 x bfloat> @intrinsic_vle_mask_v_nxv16bf16_nxv16bf16(<vscale
 entry:
   %a = call <vscale x 16 x bfloat> @llvm.riscv.vle.mask.nxv16bf16(
     <vscale x 16 x bfloat> %0,
-    <vscale x 16 x bfloat>* %1,
+    ptr %1,
     <vscale x 16 x i1> %2,
     iXLen %3, iXLen 1)
 
@@ -1511,10 +1511,10 @@ entry:
 
 declare <vscale x 32 x bfloat> @llvm.riscv.vle.nxv32bf16(
   <vscale x 32 x bfloat>,
-  <vscale x 32 x bfloat>*,
+  ptr,
   iXLen);
 
-define <vscale x 32 x bfloat> @intrinsic_vle_v_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat>* %0, iXLen %1) nounwind {
+define <vscale x 32 x bfloat> @intrinsic_vle_v_nxv32bf16_nxv32bf16(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv32bf16_nxv32bf16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -1523,7 +1523,7 @@ define <vscale x 32 x bfloat> @intrinsic_vle_v_nxv32bf16_nxv32bf16(<vscale x 32
 entry:
   %a = call <vscale x 32 x bfloat> @llvm.riscv.vle.nxv32bf16(
     <vscale x 32 x bfloat> undef,
-    <vscale x 32 x bfloat>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 32 x bfloat> %a
@@ -1531,12 +1531,12 @@ entry:
 
 declare <vscale x 32 x bfloat> @llvm.riscv.vle.mask.nxv32bf16(
   <vscale x 32 x bfloat>,
-  <vscale x 32 x bfloat>*,
+  ptr,
   <vscale x 32 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 32 x bfloat> @intrinsic_vle_mask_v_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat>* %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+define <vscale x 32 x bfloat> @intrinsic_vle_mask_v_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, ptr %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv32bf16_nxv32bf16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -1545,7 +1545,7 @@ define <vscale x 32 x bfloat> @intrinsic_vle_mask_v_nxv32bf16_nxv32bf16(<vscale
 entry:
   %a = call <vscale x 32 x bfloat> @llvm.riscv.vle.mask.nxv32bf16(
     <vscale x 32 x bfloat> %0,
-    <vscale x 32 x bfloat>* %1,
+    ptr %1,
     <vscale x 32 x i1> %2,
     iXLen %3, iXLen 1)
 
@@ -1554,10 +1554,10 @@ entry:
 
 declare <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vle_v_nxv1i8_nxv1i8(<vscale x 1 x i8>* %0, iXLen %1) nounwind {
+define <vscale x 1 x i8> @intrinsic_vle_v_nxv1i8_nxv1i8(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -1566,7 +1566,7 @@ define <vscale x 1 x i8> @intrinsic_vle_v_nxv1i8_nxv1i8(<vscale x 1 x i8>* %0, i
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8(
     <vscale x 1 x i8> undef,
-    <vscale x 1 x i8>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 1 x i8> %a
@@ -1574,12 +1574,12 @@ entry:
 
 declare <vscale x 1 x i8> @llvm.riscv.vle.mask.nxv1i8(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vle_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_vle_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -1588,7 +1588,7 @@ define <vscale x 1 x i8> @intrinsic_vle_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vle.mask.nxv1i8(
     <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
+    ptr %1,
     <vscale x 1 x i1> %2,
     iXLen %3, iXLen 1)
 
@@ -1597,10 +1597,10 @@ entry:
 
 declare <vscale x 2 x i8> @llvm.riscv.vle.nxv2i8(
   <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
+  ptr,
   iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vle_v_nxv2i8_nxv2i8(<vscale x 2 x i8>* %0, iXLen %1) nounwind {
+define <vscale x 2 x i8> @intrinsic_vle_v_nxv2i8_nxv2i8(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -1609,7 +1609,7 @@ define <vscale x 2 x i8> @intrinsic_vle_v_nxv2i8_nxv2i8(<vscale x 2 x i8>* %0, i
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vle.nxv2i8(
     <vscale x 2 x i8> undef,
-    <vscale x 2 x i8>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 2 x i8> %a
@@ -1617,12 +1617,12 @@ entry:
 
 declare <vscale x 2 x i8> @llvm.riscv.vle.mask.nxv2i8(
   <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
+  ptr,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vle_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+define <vscale x 2 x i8> @intrinsic_vle_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -1631,7 +1631,7 @@ define <vscale x 2 x i8> @intrinsic_vle_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vle.mask.nxv2i8(
     <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
+    ptr %1,
     <vscale x 2 x i1> %2,
     iXLen %3, iXLen 1)
 
@@ -1640,10 +1640,10 @@ entry:
 
 declare <vscale x 4 x i8> @llvm.riscv.vle.nxv4i8(
   <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
+  ptr,
   iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vle_v_nxv4i8_nxv4i8(<vscale x 4 x i8>* %0, iXLen %1) nounwind {
+define <vscale x 4 x i8> @intrinsic_vle_v_nxv4i8_nxv4i8(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -1652,7 +1652,7 @@ define <vscale x 4 x i8> @intrinsic_vle_v_nxv4i8_nxv4i8(<vscale x 4 x i8>* %0, i
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vle.nxv4i8(
     <vscale x 4 x i8> undef,
-    <vscale x 4 x i8>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 4 x i8> %a
@@ -1660,12 +1660,12 @@ entry:
 
 declare <vscale x 4 x i8> @llvm.riscv.vle.mask.nxv4i8(
   <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
+  ptr,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vle_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+define <vscale x 4 x i8> @intrinsic_vle_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -1674,7 +1674,7 @@ define <vscale x 4 x i8> @intrinsic_vle_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vle.mask.nxv4i8(
     <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
+    ptr %1,
     <vscale x 4 x i1> %2,
     iXLen %3, iXLen 1)
 
@@ -1683,10 +1683,10 @@ entry:
 
 declare <vscale x 8 x i8> @llvm.riscv.vle.nxv8i8(
   <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
+  ptr,
   iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vle_v_nxv8i8_nxv8i8(<vscale x 8 x i8>* %0, iXLen %1) nounwind {
+define <vscale x 8 x i8> @intrinsic_vle_v_nxv8i8_nxv8i8(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -1695,7 +1695,7 @@ define <vscale x 8 x i8> @intrinsic_vle_v_nxv8i8_nxv8i8(<vscale x 8 x i8>* %0, i
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vle.nxv8i8(
     <vscale x 8 x i8> undef,
-    <vscale x 8 x i8>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 8 x i8> %a
@@ -1703,12 +1703,12 @@ entry:
 
 declare <vscale x 8 x i8> @llvm.riscv.vle.mask.nxv8i8(
   <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
+  ptr,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vle_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+define <vscale x 8 x i8> @intrinsic_vle_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -1717,7 +1717,7 @@ define <vscale x 8 x i8> @intrinsic_vle_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vle.mask.nxv8i8(
     <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
+    ptr %1,
     <vscale x 8 x i1> %2,
     iXLen %3, iXLen 1)
 
@@ -1726,10 +1726,10 @@ entry:
 
 declare <vscale x 16 x i8> @llvm.riscv.vle.nxv16i8(
   <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
+  ptr,
   iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vle_v_nxv16i8_nxv16i8(<vscale x 16 x i8>* %0, iXLen %1) nounwind {
+define <vscale x 16 x i8> @intrinsic_vle_v_nxv16i8_nxv16i8(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -1738,7 +1738,7 @@ define <vscale x 16 x i8> @intrinsic_vle_v_nxv16i8_nxv16i8(<vscale x 16 x i8>* %
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vle.nxv16i8(
     <vscale x 16 x i8> undef,
-    <vscale x 16 x i8>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 16 x i8> %a
@@ -1746,12 +1746,12 @@ entry:
 
 declare <vscale x 16 x i8> @llvm.riscv.vle.mask.nxv16i8(
   <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
+  ptr,
   <vscale x 16 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vle_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+define <vscale x 16 x i8> @intrinsic_vle_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -1760,7 +1760,7 @@ define <vscale x 16 x i8> @intrinsic_vle_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vle.mask.nxv16i8(
     <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
+    ptr %1,
     <vscale x 16 x i1> %2,
     iXLen %3, iXLen 1)
 
@@ -1769,10 +1769,10 @@ entry:
 
 declare <vscale x 32 x i8> @llvm.riscv.vle.nxv32i8(
   <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
+  ptr,
   iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vle_v_nxv32i8_nxv32i8(<vscale x 32 x i8>* %0, iXLen %1) nounwind {
+define <vscale x 32 x i8> @intrinsic_vle_v_nxv32i8_nxv32i8(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -1781,7 +1781,7 @@ define <vscale x 32 x i8> @intrinsic_vle_v_nxv32i8_nxv32i8(<vscale x 32 x i8>* %
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vle.nxv32i8(
     <vscale x 32 x i8> undef,
-    <vscale x 32 x i8>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 32 x i8> %a
@@ -1789,12 +1789,12 @@ entry:
 
 declare <vscale x 32 x i8> @llvm.riscv.vle.mask.nxv32i8(
   <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
+  ptr,
   <vscale x 32 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vle_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+define <vscale x 32 x i8> @intrinsic_vle_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -1803,7 +1803,7 @@ define <vscale x 32 x i8> @intrinsic_vle_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vle.mask.nxv32i8(
     <vscale x 32 x i8> %0,
-    <vscale x 32 x i8>* %1,
+    ptr %1,
     <vscale x 32 x i1> %2,
     iXLen %3, iXLen 1)
 
@@ -1812,10 +1812,10 @@ entry:
 
 declare <vscale x 64 x i8> @llvm.riscv.vle.nxv64i8(
   <vscale x 64 x i8>,
-  <vscale x 64 x i8>*,
+  ptr,
   iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vle_v_nxv64i8_nxv64i8(<vscale x 64 x i8>* %0, iXLen %1) nounwind {
+define <vscale x 64 x i8> @intrinsic_vle_v_nxv64i8_nxv64i8(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
@@ -1824,7 +1824,7 @@ define <vscale x 64 x i8> @intrinsic_vle_v_nxv64i8_nxv64i8(<vscale x 64 x i8>* %
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vle.nxv64i8(
     <vscale x 64 x i8> undef,
-    <vscale x 64 x i8>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 64 x i8> %a
@@ -1832,12 +1832,12 @@ entry:
 
 declare <vscale x 64 x i8> @llvm.riscv.vle.mask.nxv64i8(
   <vscale x 64 x i8>,
-  <vscale x 64 x i8>*,
+  ptr,
   <vscale x 64 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vle_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
+define <vscale x 64 x i8> @intrinsic_vle_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, ptr %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
@@ -1846,7 +1846,7 @@ define <vscale x 64 x i8> @intrinsic_vle_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vle.mask.nxv64i8(
     <vscale x 64 x i8> %0,
-    <vscale x 64 x i8>* %1,
+    ptr %1,
     <vscale x 64 x i1> %2,
     iXLen %3, iXLen 1)
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll b/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll
index f07a1e8997a2a..15cb42bacf173 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll
@@ -1,13 +1,13 @@
 ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+v -stop-after=finalize-isel -target-abi=lp64 < %s | FileCheck %s
 
-declare { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>*, i64)
-declare { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.mask.nxv8i8.i64(<vscale x 8 x i8>, <vscale x 8 x i8>*, <vscale x 8 x i1>, i64, i64 immarg)
+declare { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.nxv8i8(<vscale x 8 x i8>, ptr, i64)
+declare { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.mask.nxv8i8.i64(<vscale x 8 x i8>, ptr, <vscale x 8 x i1>, i64, i64 immarg)
 
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg2ff.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, i8* , i64)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i1>, i64, i64)
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg2ff.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, ptr , i64)
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i1>, i64, i64)
 
-define i64 @test_vleff_nxv8i8(<vscale x 8 x i8> *%p, i64 %vl) {
+define i64 @test_vleff_nxv8i8(ptr %p, i64 %vl) {
   ; CHECK-LABEL: name: test_vleff_nxv8i8
   ; CHECK: bb.0.entry:
   ; CHECK-NEXT:   liveins: $x10, $x11
@@ -18,12 +18,12 @@ define i64 @test_vleff_nxv8i8(<vscale x 8 x i8> *%p, i64 %vl) {
   ; CHECK-NEXT:   $x10 = COPY [[PseudoVLE8FF_V_M1_1]]
   ; CHECK-NEXT:   PseudoRET implicit $x10
 entry:
-  %0 = call { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8>* %p, i64 %vl)
+  %0 = call { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.nxv8i8(<vscale x 8 x i8> undef, ptr %p, i64 %vl)
   %1 = extractvalue { <vscale x 8 x i8>, i64 } %0, 1
   ret i64 %1
 }
 
-define i64 @test_vleff_nxv8i8_tu(<vscale x 8 x i8> %merge, <vscale x 8 x i8> *%p, i64 %vl) {
+define i64 @test_vleff_nxv8i8_tu(<vscale x 8 x i8> %merge, ptr %p, i64 %vl) {
   ; CHECK-LABEL: name: test_vleff_nxv8i8_tu
   ; CHECK: bb.0.entry:
   ; CHECK-NEXT:   liveins: $v8, $x10, $x11
@@ -35,12 +35,12 @@ define i64 @test_vleff_nxv8i8_tu(<vscale x 8 x i8> %merge, <vscale x 8 x i8> *%p
   ; CHECK-NEXT:   $x10 = COPY [[PseudoVLE8FF_V_M1_1]]
   ; CHECK-NEXT:   PseudoRET implicit $x10
 entry:
-  %0 = call { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.nxv8i8(<vscale x 8 x i8> %merge, <vscale x 8 x i8>* %p, i64 %vl)
+  %0 = call { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.nxv8i8(<vscale x 8 x i8> %merge, ptr %p, i64 %vl)
   %1 = extractvalue { <vscale x 8 x i8>, i64 } %0, 1
   ret i64 %1
 }
 
-define i64 @test_vleff_nxv8i8_mask(<vscale x 8 x i8> %maskedoff, <vscale x 8 x i8> *%p, <vscale x 8 x i1> %m, i64 %vl) {
+define i64 @test_vleff_nxv8i8_mask(<vscale x 8 x i8> %maskedoff, ptr %p, <vscale x 8 x i1> %m, i64 %vl) {
   ; CHECK-LABEL: name: test_vleff_nxv8i8_mask
   ; CHECK: bb.0.entry:
   ; CHECK-NEXT:   liveins: $v8, $x10, $v0, $x11
@@ -54,12 +54,12 @@ define i64 @test_vleff_nxv8i8_mask(<vscale x 8 x i8> %maskedoff, <vscale x 8 x i
   ; CHECK-NEXT:   $x10 = COPY [[PseudoVLE8FF_V_M1_MASK1]]
   ; CHECK-NEXT:   PseudoRET implicit $x10
 entry:
-  %0 = call { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.mask.nxv8i8.i64(<vscale x 8 x i8> %maskedoff, <vscale x 8 x i8> *%p, <vscale x 8 x i1> %m, i64 %vl, i64 0)
+  %0 = call { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.mask.nxv8i8.i64(<vscale x 8 x i8> %maskedoff, ptr %p, <vscale x 8 x i1> %m, i64 %vl, i64 0)
   %1 = extractvalue { <vscale x 8 x i8>, i64 } %0, 1
   ret i64 %1
 }
 
-define i64 @test_vlseg2ff_nxv8i8(i8* %base, i64 %vl, i64* %outvl) {
+define i64 @test_vlseg2ff_nxv8i8(ptr %base, i64 %vl, ptr %outvl) {
   ; CHECK-LABEL: name: test_vlseg2ff_nxv8i8
   ; CHECK: bb.0.entry:
   ; CHECK-NEXT:   liveins: $x10, $x11
@@ -75,12 +75,12 @@ define i64 @test_vlseg2ff_nxv8i8(i8* %base, i64 %vl, i64* %outvl) {
   ; CHECK-NEXT:   $x10 = COPY [[PseudoVLSEG2E8FF_V_M1_1]]
   ; CHECK-NEXT:   PseudoRET implicit $x10
 entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg2ff.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, i8* %base, i64 %vl)
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg2ff.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %vl)
   %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} %0, 2
   ret i64 %1
 }
 
-define i64 @test_vlseg2ff_nxv8i8_tu(<vscale x 8 x i8> %val, i8* %base, i64 %vl, i64* %outvl) {
+define i64 @test_vlseg2ff_nxv8i8_tu(<vscale x 8 x i8> %val, ptr %base, i64 %vl, ptr %outvl) {
   ; CHECK-LABEL: name: test_vlseg2ff_nxv8i8_tu
   ; CHECK: bb.0.entry:
   ; CHECK-NEXT:   liveins: $v8, $x10, $x11
@@ -93,12 +93,12 @@ define i64 @test_vlseg2ff_nxv8i8_tu(<vscale x 8 x i8> %val, i8* %base, i64 %vl,
   ; CHECK-NEXT:   $x10 = COPY [[PseudoVLSEG2E8FF_V_M1_1]]
   ; CHECK-NEXT:   PseudoRET implicit $x10
 entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg2ff.nxv8i8(<vscale x 8 x i8> %val, <vscale x 8 x i8> %val, i8* %base, i64 %vl)
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg2ff.nxv8i8(<vscale x 8 x i8> %val, <vscale x 8 x i8> %val, ptr %base, i64 %vl)
   %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} %0, 2
   ret i64 %1
 }
 
-define i64 @test_vlseg2ff_nxv8i8_mask(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i64 %vl, i64* %outvl) {
+define i64 @test_vlseg2ff_nxv8i8_mask(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, ptr %outvl) {
   ; CHECK-LABEL: name: test_vlseg2ff_nxv8i8_mask
   ; CHECK: bb.0.entry:
   ; CHECK-NEXT:   liveins: $v8, $x10, $v0, $x11
@@ -113,7 +113,7 @@ define i64 @test_vlseg2ff_nxv8i8_mask(<vscale x 8 x i8> %val, i8* %base, <vscale
   ; CHECK-NEXT:   $x10 = COPY [[PseudoVLSEG2E8FF_V_M1_MASK1]]
   ; CHECK-NEXT:   PseudoRET implicit $x10
 entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8(<vscale x 8 x i8> %val, <vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i64 %vl, i64 0)
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8(<vscale x 8 x i8> %val, <vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 0)
   %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} %0, 2
   ret i64 %1
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vleff.ll b/llvm/test/CodeGen/RISCV/rvv/vleff.ll
index c9fdaa9b8bcbb..7a8ed4153c352 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vleff.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vleff.ll
@@ -6,10 +6,10 @@
 
 declare { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.nxv1i64(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vleff_v_nxv1i64_nxv1i64(<vscale x 1 x i64>* %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vleff_v_nxv1i64_nxv1i64(ptr %0, iXLen %1, iXLen* %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv1i64_nxv1i64:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -28,7 +28,7 @@ define <vscale x 1 x i64> @intrinsic_vleff_v_nxv1i64_nxv1i64(<vscale x 1 x i64>*
 entry:
   %a = call { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.nxv1i64(
     <vscale x 1 x i64> undef,
-    <vscale x 1 x i64>* %0,
+    ptr %0,
     iXLen %1)
   %b = extractvalue { <vscale x 1 x i64>, iXLen } %a, 0
   %c = extractvalue { <vscale x 1 x i64>, iXLen } %a, 1
@@ -38,12 +38,12 @@ entry:
 
 declare { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv1i64(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vleff_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vleff_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -62,7 +62,7 @@ define <vscale x 1 x i64> @intrinsic_vleff_mask_v_nxv1i64_nxv1i64(<vscale x 1 x
 entry:
   %a = call { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv1i64(
     <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
+    ptr %1,
     <vscale x 1 x i1> %2,
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 1 x i64>, iXLen } %a, 0
@@ -74,10 +74,10 @@ entry:
 
 declare { <vscale x 2 x i64>, iXLen } @llvm.riscv.vleff.nxv2i64(
   <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
+  ptr,
   iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vleff_v_nxv2i64_nxv2i64(<vscale x 2 x i64>* %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vleff_v_nxv2i64_nxv2i64(ptr %0, iXLen %1, iXLen* %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv2i64_nxv2i64:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -96,7 +96,7 @@ define <vscale x 2 x i64> @intrinsic_vleff_v_nxv2i64_nxv2i64(<vscale x 2 x i64>*
 entry:
   %a = call { <vscale x 2 x i64>, iXLen } @llvm.riscv.vleff.nxv2i64(
     <vscale x 2 x i64> undef,
-    <vscale x 2 x i64>* %0,
+    ptr %0,
     iXLen %1)
   %b = extractvalue { <vscale x 2 x i64>, iXLen } %a, 0
   %c = extractvalue { <vscale x 2 x i64>, iXLen } %a, 1
@@ -106,12 +106,12 @@ entry:
 
 declare { <vscale x 2 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv2i64(
   <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
+  ptr,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vleff_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vleff_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv2i64_nxv2i64:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -130,7 +130,7 @@ define <vscale x 2 x i64> @intrinsic_vleff_mask_v_nxv2i64_nxv2i64(<vscale x 2 x
 entry:
   %a = call { <vscale x 2 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv2i64(
     <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
+    ptr %1,
     <vscale x 2 x i1> %2,
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 2 x i64>, iXLen } %a, 0
@@ -142,10 +142,10 @@ entry:
 
 declare { <vscale x 4 x i64>, iXLen } @llvm.riscv.vleff.nxv4i64(
   <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
+  ptr,
   iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vleff_v_nxv4i64_nxv4i64(<vscale x 4 x i64>* %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vleff_v_nxv4i64_nxv4i64(ptr %0, iXLen %1, iXLen* %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv4i64_nxv4i64:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -164,7 +164,7 @@ define <vscale x 4 x i64> @intrinsic_vleff_v_nxv4i64_nxv4i64(<vscale x 4 x i64>*
 entry:
   %a = call { <vscale x 4 x i64>, iXLen } @llvm.riscv.vleff.nxv4i64(
     <vscale x 4 x i64> undef,
-    <vscale x 4 x i64>* %0,
+    ptr %0,
     iXLen %1)
   %b = extractvalue { <vscale x 4 x i64>, iXLen } %a, 0
   %c = extractvalue { <vscale x 4 x i64>, iXLen } %a, 1
@@ -174,12 +174,12 @@ entry:
 
 declare { <vscale x 4 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv4i64(
   <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
+  ptr,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vleff_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vleff_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv4i64_nxv4i64:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -198,7 +198,7 @@ define <vscale x 4 x i64> @intrinsic_vleff_mask_v_nxv4i64_nxv4i64(<vscale x 4 x
 entry:
   %a = call { <vscale x 4 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv4i64(
     <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
+    ptr %1,
     <vscale x 4 x i1> %2,
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 4 x i64>, iXLen } %a, 0
@@ -210,10 +210,10 @@ entry:
 
 declare { <vscale x 8 x i64>, iXLen } @llvm.riscv.vleff.nxv8i64(
   <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
+  ptr,
   iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vleff_v_nxv8i64_nxv8i64(<vscale x 8 x i64>* %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vleff_v_nxv8i64_nxv8i64(ptr %0, iXLen %1, iXLen* %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv8i64_nxv8i64:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -232,7 +232,7 @@ define <vscale x 8 x i64> @intrinsic_vleff_v_nxv8i64_nxv8i64(<vscale x 8 x i64>*
 entry:
   %a = call { <vscale x 8 x i64>, iXLen } @llvm.riscv.vleff.nxv8i64(
     <vscale x 8 x i64> undef,
-    <vscale x 8 x i64>* %0,
+    ptr %0,
     iXLen %1)
   %b = extractvalue { <vscale x 8 x i64>, iXLen } %a, 0
   %c = extractvalue { <vscale x 8 x i64>, iXLen } %a, 1
@@ -242,12 +242,12 @@ entry:
 
 declare { <vscale x 8 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv8i64(
   <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
+  ptr,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vleff_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vleff_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv8i64_nxv8i64:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -266,7 +266,7 @@ define <vscale x 8 x i64> @intrinsic_vleff_mask_v_nxv8i64_nxv8i64(<vscale x 8 x
 entry:
   %a = call { <vscale x 8 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv8i64(
     <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
+    ptr %1,
     <vscale x 8 x i1> %2,
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 8 x i64>, iXLen } %a, 0
@@ -278,10 +278,10 @@ entry:
 
 declare { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.nxv1f64(
   <vscale x 1 x double>,
-  <vscale x 1 x double>*,
+  ptr,
   iXLen);
 
-define <vscale x 1 x double> @intrinsic_vleff_v_nxv1f64_nxv1f64(<vscale x 1 x double>* %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vleff_v_nxv1f64_nxv1f64(ptr %0, iXLen %1, iXLen* %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv1f64_nxv1f64:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -300,7 +300,7 @@ define <vscale x 1 x double> @intrinsic_vleff_v_nxv1f64_nxv1f64(<vscale x 1 x do
 entry:
   %a = call { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.nxv1f64(
     <vscale x 1 x double> undef,
-    <vscale x 1 x double>* %0,
+    ptr %0,
     iXLen %1)
   %b = extractvalue { <vscale x 1 x double>, iXLen } %a, 0
   %c = extractvalue { <vscale x 1 x double>, iXLen } %a, 1
@@ -310,12 +310,12 @@ entry:
 
 declare { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.mask.nxv1f64(
   <vscale x 1 x double>,
-  <vscale x 1 x double>*,
+  ptr,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x double> @intrinsic_vleff_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vleff_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1f64_nxv1f64:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -334,7 +334,7 @@ define <vscale x 1 x double> @intrinsic_vleff_mask_v_nxv1f64_nxv1f64(<vscale x 1
 entry:
   %a = call { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.mask.nxv1f64(
     <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
+    ptr %1,
     <vscale x 1 x i1> %2,
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 1 x double>, iXLen } %a, 0
@@ -346,10 +346,10 @@ entry:
 
 declare { <vscale x 2 x double>, iXLen } @llvm.riscv.vleff.nxv2f64(
   <vscale x 2 x double>,
-  <vscale x 2 x double>*,
+  ptr,
   iXLen);
 
-define <vscale x 2 x double> @intrinsic_vleff_v_nxv2f64_nxv2f64(<vscale x 2 x double>* %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vleff_v_nxv2f64_nxv2f64(ptr %0, iXLen %1, iXLen* %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv2f64_nxv2f64:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -368,7 +368,7 @@ define <vscale x 2 x double> @intrinsic_vleff_v_nxv2f64_nxv2f64(<vscale x 2 x do
 entry:
   %a = call { <vscale x 2 x double>, iXLen } @llvm.riscv.vleff.nxv2f64(
     <vscale x 2 x double> undef,
-    <vscale x 2 x double>* %0,
+    ptr %0,
     iXLen %1)
   %b = extractvalue { <vscale x 2 x double>, iXLen } %a, 0
   %c = extractvalue { <vscale x 2 x double>, iXLen } %a, 1
@@ -378,12 +378,12 @@ entry:
 
 declare { <vscale x 2 x double>, iXLen } @llvm.riscv.vleff.mask.nxv2f64(
   <vscale x 2 x double>,
-  <vscale x 2 x double>*,
+  ptr,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x double> @intrinsic_vleff_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vleff_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv2f64_nxv2f64:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -402,7 +402,7 @@ define <vscale x 2 x double> @intrinsic_vleff_mask_v_nxv2f64_nxv2f64(<vscale x 2
 entry:
   %a = call { <vscale x 2 x double>, iXLen } @llvm.riscv.vleff.mask.nxv2f64(
     <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
+    ptr %1,
     <vscale x 2 x i1> %2,
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 2 x double>, iXLen } %a, 0
@@ -414,10 +414,10 @@ entry:
 
 declare { <vscale x 4 x double>, iXLen } @llvm.riscv.vleff.nxv4f64(
   <vscale x 4 x double>,
-  <vscale x 4 x double>*,
+  ptr,
   iXLen);
 
-define <vscale x 4 x double> @intrinsic_vleff_v_nxv4f64_nxv4f64(<vscale x 4 x double>* %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vleff_v_nxv4f64_nxv4f64(ptr %0, iXLen %1, iXLen* %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv4f64_nxv4f64:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -436,7 +436,7 @@ define <vscale x 4 x double> @intrinsic_vleff_v_nxv4f64_nxv4f64(<vscale x 4 x do
 entry:
   %a = call { <vscale x 4 x double>, iXLen } @llvm.riscv.vleff.nxv4f64(
     <vscale x 4 x double> undef,
-    <vscale x 4 x double>* %0,
+    ptr %0,
     iXLen %1)
   %b = extractvalue { <vscale x 4 x double>, iXLen } %a, 0
   %c = extractvalue { <vscale x 4 x double>, iXLen } %a, 1
@@ -446,12 +446,12 @@ entry:
 
 declare { <vscale x 4 x double>, iXLen } @llvm.riscv.vleff.mask.nxv4f64(
   <vscale x 4 x double>,
-  <vscale x 4 x double>*,
+  ptr,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x double> @intrinsic_vleff_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vleff_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv4f64_nxv4f64:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -470,7 +470,7 @@ define <vscale x 4 x double> @intrinsic_vleff_mask_v_nxv4f64_nxv4f64(<vscale x 4
 entry:
   %a = call { <vscale x 4 x double>, iXLen } @llvm.riscv.vleff.mask.nxv4f64(
     <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
+    ptr %1,
     <vscale x 4 x i1> %2,
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 4 x double>, iXLen } %a, 0
@@ -482,10 +482,10 @@ entry:
 
 declare { <vscale x 8 x double>, iXLen } @llvm.riscv.vleff.nxv8f64(
   <vscale x 8 x double>,
-  <vscale x 8 x double>*,
+  ptr,
   iXLen);
 
-define <vscale x 8 x double> @intrinsic_vleff_v_nxv8f64_nxv8f64(<vscale x 8 x double>* %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vleff_v_nxv8f64_nxv8f64(ptr %0, iXLen %1, iXLen* %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv8f64_nxv8f64:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -504,7 +504,7 @@ define <vscale x 8 x double> @intrinsic_vleff_v_nxv8f64_nxv8f64(<vscale x 8 x do
 entry:
   %a = call { <vscale x 8 x double>, iXLen } @llvm.riscv.vleff.nxv8f64(
     <vscale x 8 x double> undef,
-    <vscale x 8 x double>* %0,
+    ptr %0,
     iXLen %1)
   %b = extractvalue { <vscale x 8 x double>, iXLen } %a, 0
   %c = extractvalue { <vscale x 8 x double>, iXLen } %a, 1
@@ -514,12 +514,12 @@ entry:
 
 declare { <vscale x 8 x double>, iXLen } @llvm.riscv.vleff.mask.nxv8f64(
   <vscale x 8 x double>,
-  <vscale x 8 x double>*,
+  ptr,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x double> @intrinsic_vleff_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vleff_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv8f64_nxv8f64:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -538,7 +538,7 @@ define <vscale x 8 x double> @intrinsic_vleff_mask_v_nxv8f64_nxv8f64(<vscale x 8
 entry:
   %a = call { <vscale x 8 x double>, iXLen } @llvm.riscv.vleff.mask.nxv8f64(
     <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
+    ptr %1,
     <vscale x 8 x i1> %2,
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 8 x double>, iXLen } %a, 0
@@ -550,10 +550,10 @@ entry:
 
 declare { <vscale x 1 x i32>, iXLen } @llvm.riscv.vleff.nxv1i32(
   <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
+  ptr,
   iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vleff_v_nxv1i32_nxv1i32(<vscale x 1 x i32>* %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vleff_v_nxv1i32_nxv1i32(ptr %0, iXLen %1, iXLen* %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv1i32_nxv1i32:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -572,7 +572,7 @@ define <vscale x 1 x i32> @intrinsic_vleff_v_nxv1i32_nxv1i32(<vscale x 1 x i32>*
 entry:
   %a = call { <vscale x 1 x i32>, iXLen } @llvm.riscv.vleff.nxv1i32(
     <vscale x 1 x i32> undef,
-    <vscale x 1 x i32>* %0,
+    ptr %0,
     iXLen %1)
   %b = extractvalue { <vscale x 1 x i32>, iXLen } %a, 0
   %c = extractvalue { <vscale x 1 x i32>, iXLen } %a, 1
@@ -582,12 +582,12 @@ entry:
 
 declare { <vscale x 1 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv1i32(
   <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
+  ptr,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vleff_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vleff_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i32_nxv1i32:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -606,7 +606,7 @@ define <vscale x 1 x i32> @intrinsic_vleff_mask_v_nxv1i32_nxv1i32(<vscale x 1 x
 entry:
   %a = call { <vscale x 1 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv1i32(
     <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
+    ptr %1,
     <vscale x 1 x i1> %2,
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 1 x i32>, iXLen } %a, 0
@@ -618,10 +618,10 @@ entry:
 
 declare { <vscale x 2 x i32>, iXLen } @llvm.riscv.vleff.nxv2i32(
   <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
+  ptr,
   iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vleff_v_nxv2i32_nxv2i32(<vscale x 2 x i32>* %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vleff_v_nxv2i32_nxv2i32(ptr %0, iXLen %1, iXLen* %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv2i32_nxv2i32:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -640,7 +640,7 @@ define <vscale x 2 x i32> @intrinsic_vleff_v_nxv2i32_nxv2i32(<vscale x 2 x i32>*
 entry:
   %a = call { <vscale x 2 x i32>, iXLen } @llvm.riscv.vleff.nxv2i32(
     <vscale x 2 x i32> undef,
-    <vscale x 2 x i32>* %0,
+    ptr %0,
     iXLen %1)
   %b = extractvalue { <vscale x 2 x i32>, iXLen } %a, 0
   %c = extractvalue { <vscale x 2 x i32>, iXLen } %a, 1
@@ -650,12 +650,12 @@ entry:
 
 declare { <vscale x 2 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv2i32(
   <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
+  ptr,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vleff_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vleff_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv2i32_nxv2i32:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -674,7 +674,7 @@ define <vscale x 2 x i32> @intrinsic_vleff_mask_v_nxv2i32_nxv2i32(<vscale x 2 x
 entry:
   %a = call { <vscale x 2 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv2i32(
     <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
+    ptr %1,
     <vscale x 2 x i1> %2,
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 2 x i32>, iXLen } %a, 0
@@ -686,10 +686,10 @@ entry:
 
 declare { <vscale x 4 x i32>, iXLen } @llvm.riscv.vleff.nxv4i32(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vleff_v_nxv4i32_nxv4i32(<vscale x 4 x i32>* %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vleff_v_nxv4i32_nxv4i32(ptr %0, iXLen %1, iXLen* %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv4i32_nxv4i32:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -708,7 +708,7 @@ define <vscale x 4 x i32> @intrinsic_vleff_v_nxv4i32_nxv4i32(<vscale x 4 x i32>*
 entry:
   %a = call { <vscale x 4 x i32>, iXLen } @llvm.riscv.vleff.nxv4i32(
     <vscale x 4 x i32> undef,
-    <vscale x 4 x i32>* %0,
+    ptr %0,
     iXLen %1)
   %b = extractvalue { <vscale x 4 x i32>, iXLen } %a, 0
   %c = extractvalue { <vscale x 4 x i32>, iXLen } %a, 1
@@ -718,12 +718,12 @@ entry:
 
 declare { <vscale x 4 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv4i32(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vleff_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vleff_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv4i32_nxv4i32:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -742,7 +742,7 @@ define <vscale x 4 x i32> @intrinsic_vleff_mask_v_nxv4i32_nxv4i32(<vscale x 4 x
 entry:
   %a = call { <vscale x 4 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv4i32(
     <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
+    ptr %1,
     <vscale x 4 x i1> %2,
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 4 x i32>, iXLen } %a, 0
@@ -754,10 +754,10 @@ entry:
 
 declare { <vscale x 8 x i32>, iXLen } @llvm.riscv.vleff.nxv8i32(
   <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
+  ptr,
   iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vleff_v_nxv8i32_nxv8i32(<vscale x 8 x i32>* %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vleff_v_nxv8i32_nxv8i32(ptr %0, iXLen %1, iXLen* %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv8i32_nxv8i32:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -776,7 +776,7 @@ define <vscale x 8 x i32> @intrinsic_vleff_v_nxv8i32_nxv8i32(<vscale x 8 x i32>*
 entry:
   %a = call { <vscale x 8 x i32>, iXLen } @llvm.riscv.vleff.nxv8i32(
     <vscale x 8 x i32> undef,
-    <vscale x 8 x i32>* %0,
+    ptr %0,
     iXLen %1)
   %b = extractvalue { <vscale x 8 x i32>, iXLen } %a, 0
   %c = extractvalue { <vscale x 8 x i32>, iXLen } %a, 1
@@ -786,12 +786,12 @@ entry:
 
 declare { <vscale x 8 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv8i32(
   <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
+  ptr,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vleff_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vleff_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv8i32_nxv8i32:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -810,7 +810,7 @@ define <vscale x 8 x i32> @intrinsic_vleff_mask_v_nxv8i32_nxv8i32(<vscale x 8 x
 entry:
   %a = call { <vscale x 8 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv8i32(
     <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
+    ptr %1,
     <vscale x 8 x i1> %2,
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 8 x i32>, iXLen } %a, 0
@@ -822,10 +822,10 @@ entry:
 
 declare { <vscale x 16 x i32>, iXLen } @llvm.riscv.vleff.nxv16i32(
   <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
+  ptr,
   iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vleff_v_nxv16i32_nxv16i32(<vscale x 16 x i32>* %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vleff_v_nxv16i32_nxv16i32(ptr %0, iXLen %1, iXLen* %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv16i32_nxv16i32:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -844,7 +844,7 @@ define <vscale x 16 x i32> @intrinsic_vleff_v_nxv16i32_nxv16i32(<vscale x 16 x i
 entry:
   %a = call { <vscale x 16 x i32>, iXLen } @llvm.riscv.vleff.nxv16i32(
     <vscale x 16 x i32> undef,
-    <vscale x 16 x i32>* %0,
+    ptr %0,
     iXLen %1)
   %b = extractvalue { <vscale x 16 x i32>, iXLen } %a, 0
   %c = extractvalue { <vscale x 16 x i32>, iXLen } %a, 1
@@ -854,12 +854,12 @@ entry:
 
 declare { <vscale x 16 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv16i32(
   <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
+  ptr,
   <vscale x 16 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vleff_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vleff_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3, iXLen* %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv16i32_nxv16i32:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -878,7 +878,7 @@ define <vscale x 16 x i32> @intrinsic_vleff_mask_v_nxv16i32_nxv16i32(<vscale x 1
 entry:
   %a = call { <vscale x 16 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv16i32(
     <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
+    ptr %1,
     <vscale x 16 x i1> %2,
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 16 x i32>, iXLen } %a, 0
@@ -890,10 +890,10 @@ entry:
 
 declare { <vscale x 1 x float>, iXLen } @llvm.riscv.vleff.nxv1f32(
   <vscale x 1 x float>,
-  <vscale x 1 x float>*,
+  ptr,
   iXLen);
 
-define <vscale x 1 x float> @intrinsic_vleff_v_nxv1f32_nxv1f32(<vscale x 1 x float>* %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vleff_v_nxv1f32_nxv1f32(ptr %0, iXLen %1, iXLen* %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv1f32_nxv1f32:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -912,7 +912,7 @@ define <vscale x 1 x float> @intrinsic_vleff_v_nxv1f32_nxv1f32(<vscale x 1 x flo
 entry:
   %a = call { <vscale x 1 x float>, iXLen } @llvm.riscv.vleff.nxv1f32(
     <vscale x 1 x float> undef,
-    <vscale x 1 x float>* %0,
+    ptr %0,
     iXLen %1)
   %b = extractvalue { <vscale x 1 x float>, iXLen } %a, 0
   %c = extractvalue { <vscale x 1 x float>, iXLen } %a, 1
@@ -922,12 +922,12 @@ entry:
 
 declare { <vscale x 1 x float>, iXLen } @llvm.riscv.vleff.mask.nxv1f32(
   <vscale x 1 x float>,
-  <vscale x 1 x float>*,
+  ptr,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x float> @intrinsic_vleff_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vleff_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1f32_nxv1f32:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -946,7 +946,7 @@ define <vscale x 1 x float> @intrinsic_vleff_mask_v_nxv1f32_nxv1f32(<vscale x 1
 entry:
   %a = call { <vscale x 1 x float>, iXLen } @llvm.riscv.vleff.mask.nxv1f32(
     <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
+    ptr %1,
     <vscale x 1 x i1> %2,
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 1 x float>, iXLen } %a, 0
@@ -958,10 +958,10 @@ entry:
 
 declare { <vscale x 2 x float>, iXLen } @llvm.riscv.vleff.nxv2f32(
   <vscale x 2 x float>,
-  <vscale x 2 x float>*,
+  ptr,
   iXLen);
 
-define <vscale x 2 x float> @intrinsic_vleff_v_nxv2f32_nxv2f32(<vscale x 2 x float>* %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vleff_v_nxv2f32_nxv2f32(ptr %0, iXLen %1, iXLen* %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv2f32_nxv2f32:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -980,7 +980,7 @@ define <vscale x 2 x float> @intrinsic_vleff_v_nxv2f32_nxv2f32(<vscale x 2 x flo
 entry:
   %a = call { <vscale x 2 x float>, iXLen } @llvm.riscv.vleff.nxv2f32(
     <vscale x 2 x float> undef,
-    <vscale x 2 x float>* %0,
+    ptr %0,
     iXLen %1)
   %b = extractvalue { <vscale x 2 x float>, iXLen } %a, 0
   %c = extractvalue { <vscale x 2 x float>, iXLen } %a, 1
@@ -990,12 +990,12 @@ entry:
 
 declare { <vscale x 2 x float>, iXLen } @llvm.riscv.vleff.mask.nxv2f32(
   <vscale x 2 x float>,
-  <vscale x 2 x float>*,
+  ptr,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x float> @intrinsic_vleff_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vleff_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv2f32_nxv2f32:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -1014,7 +1014,7 @@ define <vscale x 2 x float> @intrinsic_vleff_mask_v_nxv2f32_nxv2f32(<vscale x 2
 entry:
   %a = call { <vscale x 2 x float>, iXLen } @llvm.riscv.vleff.mask.nxv2f32(
     <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
+    ptr %1,
     <vscale x 2 x i1> %2,
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 2 x float>, iXLen } %a, 0
@@ -1026,10 +1026,10 @@ entry:
 
 declare { <vscale x 4 x float>, iXLen } @llvm.riscv.vleff.nxv4f32(
   <vscale x 4 x float>,
-  <vscale x 4 x float>*,
+  ptr,
   iXLen);
 
-define <vscale x 4 x float> @intrinsic_vleff_v_nxv4f32_nxv4f32(<vscale x 4 x float>* %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vleff_v_nxv4f32_nxv4f32(ptr %0, iXLen %1, iXLen* %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv4f32_nxv4f32:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1048,7 +1048,7 @@ define <vscale x 4 x float> @intrinsic_vleff_v_nxv4f32_nxv4f32(<vscale x 4 x flo
 entry:
   %a = call { <vscale x 4 x float>, iXLen } @llvm.riscv.vleff.nxv4f32(
     <vscale x 4 x float> undef,
-    <vscale x 4 x float>* %0,
+    ptr %0,
     iXLen %1)
   %b = extractvalue { <vscale x 4 x float>, iXLen } %a, 0
   %c = extractvalue { <vscale x 4 x float>, iXLen } %a, 1
@@ -1058,12 +1058,12 @@ entry:
 
 declare { <vscale x 4 x float>, iXLen } @llvm.riscv.vleff.mask.nxv4f32(
   <vscale x 4 x float>,
-  <vscale x 4 x float>*,
+  ptr,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x float> @intrinsic_vleff_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vleff_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv4f32_nxv4f32:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -1082,7 +1082,7 @@ define <vscale x 4 x float> @intrinsic_vleff_mask_v_nxv4f32_nxv4f32(<vscale x 4
 entry:
   %a = call { <vscale x 4 x float>, iXLen } @llvm.riscv.vleff.mask.nxv4f32(
     <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
+    ptr %1,
     <vscale x 4 x i1> %2,
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 4 x float>, iXLen } %a, 0
@@ -1094,10 +1094,10 @@ entry:
 
 declare { <vscale x 8 x float>, iXLen } @llvm.riscv.vleff.nxv8f32(
   <vscale x 8 x float>,
-  <vscale x 8 x float>*,
+  ptr,
   iXLen);
 
-define <vscale x 8 x float> @intrinsic_vleff_v_nxv8f32_nxv8f32(<vscale x 8 x float>* %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vleff_v_nxv8f32_nxv8f32(ptr %0, iXLen %1, iXLen* %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv8f32_nxv8f32:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1116,7 +1116,7 @@ define <vscale x 8 x float> @intrinsic_vleff_v_nxv8f32_nxv8f32(<vscale x 8 x flo
 entry:
   %a = call { <vscale x 8 x float>, iXLen } @llvm.riscv.vleff.nxv8f32(
     <vscale x 8 x float> undef,
-    <vscale x 8 x float>* %0,
+    ptr %0,
     iXLen %1)
   %b = extractvalue { <vscale x 8 x float>, iXLen } %a, 0
   %c = extractvalue { <vscale x 8 x float>, iXLen } %a, 1
@@ -1126,12 +1126,12 @@ entry:
 
 declare { <vscale x 8 x float>, iXLen } @llvm.riscv.vleff.mask.nxv8f32(
   <vscale x 8 x float>,
-  <vscale x 8 x float>*,
+  ptr,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x float> @intrinsic_vleff_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vleff_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv8f32_nxv8f32:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -1150,7 +1150,7 @@ define <vscale x 8 x float> @intrinsic_vleff_mask_v_nxv8f32_nxv8f32(<vscale x 8
 entry:
   %a = call { <vscale x 8 x float>, iXLen } @llvm.riscv.vleff.mask.nxv8f32(
     <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
+    ptr %1,
     <vscale x 8 x i1> %2,
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 8 x float>, iXLen } %a, 0
@@ -1162,10 +1162,10 @@ entry:
 
 declare { <vscale x 16 x float>, iXLen } @llvm.riscv.vleff.nxv16f32(
   <vscale x 16 x float>,
-  <vscale x 16 x float>*,
+  ptr,
   iXLen);
 
-define <vscale x 16 x float> @intrinsic_vleff_v_nxv16f32_nxv16f32(<vscale x 16 x float>* %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vleff_v_nxv16f32_nxv16f32(ptr %0, iXLen %1, iXLen* %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv16f32_nxv16f32:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -1184,7 +1184,7 @@ define <vscale x 16 x float> @intrinsic_vleff_v_nxv16f32_nxv16f32(<vscale x 16 x
 entry:
   %a = call { <vscale x 16 x float>, iXLen } @llvm.riscv.vleff.nxv16f32(
     <vscale x 16 x float> undef,
-    <vscale x 16 x float>* %0,
+    ptr %0,
     iXLen %1)
   %b = extractvalue { <vscale x 16 x float>, iXLen } %a, 0
   %c = extractvalue { <vscale x 16 x float>, iXLen } %a, 1
@@ -1194,12 +1194,12 @@ entry:
 
 declare { <vscale x 16 x float>, iXLen } @llvm.riscv.vleff.mask.nxv16f32(
   <vscale x 16 x float>,
-  <vscale x 16 x float>*,
+  ptr,
   <vscale x 16 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 16 x float> @intrinsic_vleff_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vleff_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3, iXLen* %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv16f32_nxv16f32:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -1218,7 +1218,7 @@ define <vscale x 16 x float> @intrinsic_vleff_mask_v_nxv16f32_nxv16f32(<vscale x
 entry:
   %a = call { <vscale x 16 x float>, iXLen } @llvm.riscv.vleff.mask.nxv16f32(
     <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
+    ptr %1,
     <vscale x 16 x i1> %2,
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 16 x float>, iXLen } %a, 0
@@ -1230,10 +1230,10 @@ entry:
 
 declare { <vscale x 1 x i16>, iXLen } @llvm.riscv.vleff.nxv1i16(
   <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
+  ptr,
   iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vleff_v_nxv1i16_nxv1i16(<vscale x 1 x i16>* %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vleff_v_nxv1i16_nxv1i16(ptr %0, iXLen %1, iXLen* %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv1i16_nxv1i16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1252,7 +1252,7 @@ define <vscale x 1 x i16> @intrinsic_vleff_v_nxv1i16_nxv1i16(<vscale x 1 x i16>*
 entry:
   %a = call { <vscale x 1 x i16>, iXLen } @llvm.riscv.vleff.nxv1i16(
     <vscale x 1 x i16> undef,
-    <vscale x 1 x i16>* %0,
+    ptr %0,
     iXLen %1)
   %b = extractvalue { <vscale x 1 x i16>, iXLen } %a, 0
   %c = extractvalue { <vscale x 1 x i16>, iXLen } %a, 1
@@ -1262,12 +1262,12 @@ entry:
 
 declare { <vscale x 1 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv1i16(
   <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
+  ptr,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vleff_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vleff_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i16_nxv1i16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1286,7 +1286,7 @@ define <vscale x 1 x i16> @intrinsic_vleff_mask_v_nxv1i16_nxv1i16(<vscale x 1 x
 entry:
   %a = call { <vscale x 1 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv1i16(
     <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
+    ptr %1,
     <vscale x 1 x i1> %2,
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 1 x i16>, iXLen } %a, 0
@@ -1298,10 +1298,10 @@ entry:
 
 declare { <vscale x 2 x i16>, iXLen } @llvm.riscv.vleff.nxv2i16(
   <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
+  ptr,
   iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vleff_v_nxv2i16_nxv2i16(<vscale x 2 x i16>* %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vleff_v_nxv2i16_nxv2i16(ptr %0, iXLen %1, iXLen* %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv2i16_nxv2i16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1320,7 +1320,7 @@ define <vscale x 2 x i16> @intrinsic_vleff_v_nxv2i16_nxv2i16(<vscale x 2 x i16>*
 entry:
   %a = call { <vscale x 2 x i16>, iXLen } @llvm.riscv.vleff.nxv2i16(
     <vscale x 2 x i16> undef,
-    <vscale x 2 x i16>* %0,
+    ptr %0,
     iXLen %1)
   %b = extractvalue { <vscale x 2 x i16>, iXLen } %a, 0
   %c = extractvalue { <vscale x 2 x i16>, iXLen } %a, 1
@@ -1330,12 +1330,12 @@ entry:
 
 declare { <vscale x 2 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv2i16(
   <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
+  ptr,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vleff_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vleff_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv2i16_nxv2i16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1354,7 +1354,7 @@ define <vscale x 2 x i16> @intrinsic_vleff_mask_v_nxv2i16_nxv2i16(<vscale x 2 x
 entry:
   %a = call { <vscale x 2 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv2i16(
     <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
+    ptr %1,
     <vscale x 2 x i1> %2,
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 2 x i16>, iXLen } %a, 0
@@ -1366,10 +1366,10 @@ entry:
 
 declare { <vscale x 4 x i16>, iXLen } @llvm.riscv.vleff.nxv4i16(
   <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
+  ptr,
   iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vleff_v_nxv4i16_nxv4i16(<vscale x 4 x i16>* %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vleff_v_nxv4i16_nxv4i16(ptr %0, iXLen %1, iXLen* %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv4i16_nxv4i16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1388,7 +1388,7 @@ define <vscale x 4 x i16> @intrinsic_vleff_v_nxv4i16_nxv4i16(<vscale x 4 x i16>*
 entry:
   %a = call { <vscale x 4 x i16>, iXLen } @llvm.riscv.vleff.nxv4i16(
     <vscale x 4 x i16> undef,
-    <vscale x 4 x i16>* %0,
+    ptr %0,
     iXLen %1)
   %b = extractvalue { <vscale x 4 x i16>, iXLen } %a, 0
   %c = extractvalue { <vscale x 4 x i16>, iXLen } %a, 1
@@ -1398,12 +1398,12 @@ entry:
 
 declare { <vscale x 4 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv4i16(
   <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
+  ptr,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vleff_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vleff_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv4i16_nxv4i16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1422,7 +1422,7 @@ define <vscale x 4 x i16> @intrinsic_vleff_mask_v_nxv4i16_nxv4i16(<vscale x 4 x
 entry:
   %a = call { <vscale x 4 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv4i16(
     <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
+    ptr %1,
     <vscale x 4 x i1> %2,
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 4 x i16>, iXLen } %a, 0
@@ -1434,10 +1434,10 @@ entry:
 
 declare { <vscale x 8 x i16>, iXLen } @llvm.riscv.vleff.nxv8i16(
   <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
+  ptr,
   iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vleff_v_nxv8i16_nxv8i16(<vscale x 8 x i16>* %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vleff_v_nxv8i16_nxv8i16(ptr %0, iXLen %1, iXLen* %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv8i16_nxv8i16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1456,7 +1456,7 @@ define <vscale x 8 x i16> @intrinsic_vleff_v_nxv8i16_nxv8i16(<vscale x 8 x i16>*
 entry:
   %a = call { <vscale x 8 x i16>, iXLen } @llvm.riscv.vleff.nxv8i16(
     <vscale x 8 x i16> undef,
-    <vscale x 8 x i16>* %0,
+    ptr %0,
     iXLen %1)
   %b = extractvalue { <vscale x 8 x i16>, iXLen } %a, 0
   %c = extractvalue { <vscale x 8 x i16>, iXLen } %a, 1
@@ -1466,12 +1466,12 @@ entry:
 
 declare { <vscale x 8 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv8i16(
   <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
+  ptr,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vleff_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vleff_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv8i16_nxv8i16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1490,7 +1490,7 @@ define <vscale x 8 x i16> @intrinsic_vleff_mask_v_nxv8i16_nxv8i16(<vscale x 8 x
 entry:
   %a = call { <vscale x 8 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv8i16(
     <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
+    ptr %1,
     <vscale x 8 x i1> %2,
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 8 x i16>, iXLen } %a, 0
@@ -1502,10 +1502,10 @@ entry:
 
 declare { <vscale x 16 x i16>, iXLen } @llvm.riscv.vleff.nxv16i16(
   <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
+  ptr,
   iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vleff_v_nxv16i16_nxv16i16(<vscale x 16 x i16>* %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vleff_v_nxv16i16_nxv16i16(ptr %0, iXLen %1, iXLen* %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv16i16_nxv16i16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1524,7 +1524,7 @@ define <vscale x 16 x i16> @intrinsic_vleff_v_nxv16i16_nxv16i16(<vscale x 16 x i
 entry:
   %a = call { <vscale x 16 x i16>, iXLen } @llvm.riscv.vleff.nxv16i16(
     <vscale x 16 x i16> undef,
-    <vscale x 16 x i16>* %0,
+    ptr %0,
     iXLen %1)
   %b = extractvalue { <vscale x 16 x i16>, iXLen } %a, 0
   %c = extractvalue { <vscale x 16 x i16>, iXLen } %a, 1
@@ -1534,12 +1534,12 @@ entry:
 
 declare { <vscale x 16 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv16i16(
   <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
+  ptr,
   <vscale x 16 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vleff_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vleff_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3, iXLen* %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv16i16_nxv16i16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1558,7 +1558,7 @@ define <vscale x 16 x i16> @intrinsic_vleff_mask_v_nxv16i16_nxv16i16(<vscale x 1
 entry:
   %a = call { <vscale x 16 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv16i16(
     <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
+    ptr %1,
     <vscale x 16 x i1> %2,
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 16 x i16>, iXLen } %a, 0
@@ -1570,10 +1570,10 @@ entry:
 
 declare { <vscale x 32 x i16>, iXLen } @llvm.riscv.vleff.nxv32i16(
   <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
+  ptr,
   iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vleff_v_nxv32i16_nxv32i16(<vscale x 32 x i16>* %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vleff_v_nxv32i16_nxv32i16(ptr %0, iXLen %1, iXLen* %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv32i16_nxv32i16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -1592,7 +1592,7 @@ define <vscale x 32 x i16> @intrinsic_vleff_v_nxv32i16_nxv32i16(<vscale x 32 x i
 entry:
   %a = call { <vscale x 32 x i16>, iXLen } @llvm.riscv.vleff.nxv32i16(
     <vscale x 32 x i16> undef,
-    <vscale x 32 x i16>* %0,
+    ptr %0,
     iXLen %1)
   %b = extractvalue { <vscale x 32 x i16>, iXLen } %a, 0
   %c = extractvalue { <vscale x 32 x i16>, iXLen } %a, 1
@@ -1602,12 +1602,12 @@ entry:
 
 declare { <vscale x 32 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv32i16(
   <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
+  ptr,
   <vscale x 32 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vleff_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vleff_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i1> %2, iXLen %3, iXLen* %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv32i16_nxv32i16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -1626,7 +1626,7 @@ define <vscale x 32 x i16> @intrinsic_vleff_mask_v_nxv32i16_nxv32i16(<vscale x 3
 entry:
   %a = call { <vscale x 32 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv32i16(
     <vscale x 32 x i16> %0,
-    <vscale x 32 x i16>* %1,
+    ptr %1,
     <vscale x 32 x i1> %2,
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 32 x i16>, iXLen } %a, 0
@@ -1638,10 +1638,10 @@ entry:
 
 declare { <vscale x 1 x half>, iXLen } @llvm.riscv.vleff.nxv1f16(
   <vscale x 1 x half>,
-  <vscale x 1 x half>*,
+  ptr,
   iXLen);
 
-define <vscale x 1 x half> @intrinsic_vleff_v_nxv1half_nxv1f16(<vscale x 1 x half>* %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vleff_v_nxv1half_nxv1f16(ptr %0, iXLen %1, iXLen* %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv1half_nxv1f16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1660,7 +1660,7 @@ define <vscale x 1 x half> @intrinsic_vleff_v_nxv1half_nxv1f16(<vscale x 1 x hal
 entry:
   %a = call { <vscale x 1 x half>, iXLen } @llvm.riscv.vleff.nxv1f16(
     <vscale x 1 x half> undef,
-    <vscale x 1 x half>* %0,
+    ptr %0,
     iXLen %1)
   %b = extractvalue { <vscale x 1 x half>, iXLen } %a, 0
   %c = extractvalue { <vscale x 1 x half>, iXLen } %a, 1
@@ -1670,12 +1670,12 @@ entry:
 
 declare { <vscale x 1 x half>, iXLen } @llvm.riscv.vleff.mask.nxv1f16(
   <vscale x 1 x half>,
-  <vscale x 1 x half>*,
+  ptr,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x half> @intrinsic_vleff_mask_v_nxv1half_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vleff_mask_v_nxv1half_nxv1f16(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1half_nxv1f16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1694,7 +1694,7 @@ define <vscale x 1 x half> @intrinsic_vleff_mask_v_nxv1half_nxv1f16(<vscale x 1
 entry:
   %a = call { <vscale x 1 x half>, iXLen } @llvm.riscv.vleff.mask.nxv1f16(
     <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
+    ptr %1,
     <vscale x 1 x i1> %2,
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 1 x half>, iXLen } %a, 0
@@ -1706,10 +1706,10 @@ entry:
 
 declare { <vscale x 2 x half>, iXLen } @llvm.riscv.vleff.nxv2f16(
   <vscale x 2 x half>,
-  <vscale x 2 x half>*,
+  ptr,
   iXLen);
 
-define <vscale x 2 x half> @intrinsic_vleff_v_nxv2half_nxv2f16(<vscale x 2 x half>* %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vleff_v_nxv2half_nxv2f16(ptr %0, iXLen %1, iXLen* %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv2half_nxv2f16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1728,7 +1728,7 @@ define <vscale x 2 x half> @intrinsic_vleff_v_nxv2half_nxv2f16(<vscale x 2 x hal
 entry:
   %a = call { <vscale x 2 x half>, iXLen } @llvm.riscv.vleff.nxv2f16(
     <vscale x 2 x half> undef,
-    <vscale x 2 x half>* %0,
+    ptr %0,
     iXLen %1)
   %b = extractvalue { <vscale x 2 x half>, iXLen } %a, 0
   %c = extractvalue { <vscale x 2 x half>, iXLen } %a, 1
@@ -1738,12 +1738,12 @@ entry:
 
 declare { <vscale x 2 x half>, iXLen } @llvm.riscv.vleff.mask.nxv2f16(
   <vscale x 2 x half>,
-  <vscale x 2 x half>*,
+  ptr,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x half> @intrinsic_vleff_mask_v_nxv2half_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vleff_mask_v_nxv2half_nxv2f16(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv2half_nxv2f16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1762,7 +1762,7 @@ define <vscale x 2 x half> @intrinsic_vleff_mask_v_nxv2half_nxv2f16(<vscale x 2
 entry:
   %a = call { <vscale x 2 x half>, iXLen } @llvm.riscv.vleff.mask.nxv2f16(
     <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
+    ptr %1,
     <vscale x 2 x i1> %2,
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 2 x half>, iXLen } %a, 0
@@ -1774,10 +1774,10 @@ entry:
 
 declare { <vscale x 4 x half>, iXLen } @llvm.riscv.vleff.nxv4f16(
   <vscale x 4 x half>,
-  <vscale x 4 x half>*,
+  ptr,
   iXLen);
 
-define <vscale x 4 x half> @intrinsic_vleff_v_nxv4half_nxv4f16(<vscale x 4 x half>* %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vleff_v_nxv4half_nxv4f16(ptr %0, iXLen %1, iXLen* %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv4half_nxv4f16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1796,7 +1796,7 @@ define <vscale x 4 x half> @intrinsic_vleff_v_nxv4half_nxv4f16(<vscale x 4 x hal
 entry:
   %a = call { <vscale x 4 x half>, iXLen } @llvm.riscv.vleff.nxv4f16(
     <vscale x 4 x half> undef,
-    <vscale x 4 x half>* %0,
+    ptr %0,
     iXLen %1)
   %b = extractvalue { <vscale x 4 x half>, iXLen } %a, 0
   %c = extractvalue { <vscale x 4 x half>, iXLen } %a, 1
@@ -1806,12 +1806,12 @@ entry:
 
 declare { <vscale x 4 x half>, iXLen } @llvm.riscv.vleff.mask.nxv4f16(
   <vscale x 4 x half>,
-  <vscale x 4 x half>*,
+  ptr,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x half> @intrinsic_vleff_mask_v_nxv4half_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vleff_mask_v_nxv4half_nxv4f16(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv4half_nxv4f16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1830,7 +1830,7 @@ define <vscale x 4 x half> @intrinsic_vleff_mask_v_nxv4half_nxv4f16(<vscale x 4
 entry:
   %a = call { <vscale x 4 x half>, iXLen } @llvm.riscv.vleff.mask.nxv4f16(
     <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
+    ptr %1,
     <vscale x 4 x i1> %2,
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 4 x half>, iXLen } %a, 0
@@ -1842,10 +1842,10 @@ entry:
 
 declare { <vscale x 8 x half>, iXLen } @llvm.riscv.vleff.nxv8f16(
   <vscale x 8 x half>,
-  <vscale x 8 x half>*,
+  ptr,
   iXLen);
 
-define <vscale x 8 x half> @intrinsic_vleff_v_nxv8half_nxv8f16(<vscale x 8 x half>* %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vleff_v_nxv8half_nxv8f16(ptr %0, iXLen %1, iXLen* %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv8half_nxv8f16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1864,7 +1864,7 @@ define <vscale x 8 x half> @intrinsic_vleff_v_nxv8half_nxv8f16(<vscale x 8 x hal
 entry:
   %a = call { <vscale x 8 x half>, iXLen } @llvm.riscv.vleff.nxv8f16(
     <vscale x 8 x half> undef,
-    <vscale x 8 x half>* %0,
+    ptr %0,
     iXLen %1)
   %b = extractvalue { <vscale x 8 x half>, iXLen } %a, 0
   %c = extractvalue { <vscale x 8 x half>, iXLen } %a, 1
@@ -1874,12 +1874,12 @@ entry:
 
 declare { <vscale x 8 x half>, iXLen } @llvm.riscv.vleff.mask.nxv8f16(
   <vscale x 8 x half>,
-  <vscale x 8 x half>*,
+  ptr,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x half> @intrinsic_vleff_mask_v_nxv8half_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vleff_mask_v_nxv8half_nxv8f16(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv8half_nxv8f16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1898,7 +1898,7 @@ define <vscale x 8 x half> @intrinsic_vleff_mask_v_nxv8half_nxv8f16(<vscale x 8
 entry:
   %a = call { <vscale x 8 x half>, iXLen } @llvm.riscv.vleff.mask.nxv8f16(
     <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
+    ptr %1,
     <vscale x 8 x i1> %2,
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 8 x half>, iXLen } %a, 0
@@ -1910,10 +1910,10 @@ entry:
 
 declare { <vscale x 16 x half>, iXLen } @llvm.riscv.vleff.nxv16f16(
   <vscale x 16 x half>,
-  <vscale x 16 x half>*,
+  ptr,
   iXLen);
 
-define <vscale x 16 x half> @intrinsic_vleff_v_nxv16half_nxv16f16(<vscale x 16 x half>* %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vleff_v_nxv16half_nxv16f16(ptr %0, iXLen %1, iXLen* %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv16half_nxv16f16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1932,7 +1932,7 @@ define <vscale x 16 x half> @intrinsic_vleff_v_nxv16half_nxv16f16(<vscale x 16 x
 entry:
   %a = call { <vscale x 16 x half>, iXLen } @llvm.riscv.vleff.nxv16f16(
     <vscale x 16 x half> undef,
-    <vscale x 16 x half>* %0,
+    ptr %0,
     iXLen %1)
   %b = extractvalue { <vscale x 16 x half>, iXLen } %a, 0
   %c = extractvalue { <vscale x 16 x half>, iXLen } %a, 1
@@ -1942,12 +1942,12 @@ entry:
 
 declare { <vscale x 16 x half>, iXLen } @llvm.riscv.vleff.mask.nxv16f16(
   <vscale x 16 x half>,
-  <vscale x 16 x half>*,
+  ptr,
   <vscale x 16 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 16 x half> @intrinsic_vleff_mask_v_nxv16half_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vleff_mask_v_nxv16half_nxv16f16(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3, iXLen* %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv16half_nxv16f16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1966,7 +1966,7 @@ define <vscale x 16 x half> @intrinsic_vleff_mask_v_nxv16half_nxv16f16(<vscale x
 entry:
   %a = call { <vscale x 16 x half>, iXLen } @llvm.riscv.vleff.mask.nxv16f16(
     <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
+    ptr %1,
     <vscale x 16 x i1> %2,
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 16 x half>, iXLen } %a, 0
@@ -1978,10 +1978,10 @@ entry:
 
 declare { <vscale x 32 x half>, iXLen } @llvm.riscv.vleff.nxv32f16(
   <vscale x 32 x half>,
-  <vscale x 32 x half>*,
+  ptr,
   iXLen);
 
-define <vscale x 32 x half> @intrinsic_vleff_v_nxv32half_nxv32f16(<vscale x 32 x half>* %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 32 x half> @intrinsic_vleff_v_nxv32half_nxv32f16(ptr %0, iXLen %1, iXLen* %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv32half_nxv32f16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -2000,7 +2000,7 @@ define <vscale x 32 x half> @intrinsic_vleff_v_nxv32half_nxv32f16(<vscale x 32 x
 entry:
   %a = call { <vscale x 32 x half>, iXLen } @llvm.riscv.vleff.nxv32f16(
     <vscale x 32 x half> undef,
-    <vscale x 32 x half>* %0,
+    ptr %0,
     iXLen %1)
   %b = extractvalue { <vscale x 32 x half>, iXLen } %a, 0
   %c = extractvalue { <vscale x 32 x half>, iXLen } %a, 1
@@ -2010,12 +2010,12 @@ entry:
 
 declare { <vscale x 32 x half>, iXLen } @llvm.riscv.vleff.mask.nxv32f16(
   <vscale x 32 x half>,
-  <vscale x 32 x half>*,
+  ptr,
   <vscale x 32 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 32 x half> @intrinsic_vleff_mask_v_nxv32half_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 32 x half> @intrinsic_vleff_mask_v_nxv32half_nxv32f16(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i1> %2, iXLen %3, iXLen* %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv32half_nxv32f16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -2034,7 +2034,7 @@ define <vscale x 32 x half> @intrinsic_vleff_mask_v_nxv32half_nxv32f16(<vscale x
 entry:
   %a = call { <vscale x 32 x half>, iXLen } @llvm.riscv.vleff.mask.nxv32f16(
     <vscale x 32 x half> %0,
-    <vscale x 32 x half>* %1,
+    ptr %1,
     <vscale x 32 x i1> %2,
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 32 x half>, iXLen } %a, 0
@@ -2046,10 +2046,10 @@ entry:
 
 declare { <vscale x 1 x i8>, iXLen } @llvm.riscv.vleff.nxv1i8(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vleff_v_nxv1i8_nxv1i8(<vscale x 1 x i8>* %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vleff_v_nxv1i8_nxv1i8(ptr %0, iXLen %1, iXLen* %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv1i8_nxv1i8:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -2068,7 +2068,7 @@ define <vscale x 1 x i8> @intrinsic_vleff_v_nxv1i8_nxv1i8(<vscale x 1 x i8>* %0,
 entry:
   %a = call { <vscale x 1 x i8>, iXLen } @llvm.riscv.vleff.nxv1i8(
     <vscale x 1 x i8> undef,
-    <vscale x 1 x i8>* %0,
+    ptr %0,
     iXLen %1)
   %b = extractvalue { <vscale x 1 x i8>, iXLen } %a, 0
   %c = extractvalue { <vscale x 1 x i8>, iXLen } %a, 1
@@ -2078,12 +2078,12 @@ entry:
 
 declare { <vscale x 1 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv1i8(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vleff_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vleff_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i8_nxv1i8:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -2102,7 +2102,7 @@ define <vscale x 1 x i8> @intrinsic_vleff_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8>
 entry:
   %a = call { <vscale x 1 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv1i8(
     <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
+    ptr %1,
     <vscale x 1 x i1> %2,
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 1 x i8>, iXLen } %a, 0
@@ -2114,10 +2114,10 @@ entry:
 
 declare { <vscale x 2 x i8>, iXLen } @llvm.riscv.vleff.nxv2i8(
   <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
+  ptr,
   iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vleff_v_nxv2i8_nxv2i8(<vscale x 2 x i8>* %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vleff_v_nxv2i8_nxv2i8(ptr %0, iXLen %1, iXLen* %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv2i8_nxv2i8:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -2136,7 +2136,7 @@ define <vscale x 2 x i8> @intrinsic_vleff_v_nxv2i8_nxv2i8(<vscale x 2 x i8>* %0,
 entry:
   %a = call { <vscale x 2 x i8>, iXLen } @llvm.riscv.vleff.nxv2i8(
     <vscale x 2 x i8> undef,
-    <vscale x 2 x i8>* %0,
+    ptr %0,
     iXLen %1)
   %b = extractvalue { <vscale x 2 x i8>, iXLen } %a, 0
   %c = extractvalue { <vscale x 2 x i8>, iXLen } %a, 1
@@ -2146,12 +2146,12 @@ entry:
 
 declare { <vscale x 2 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv2i8(
   <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
+  ptr,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vleff_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vleff_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv2i8_nxv2i8:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -2170,7 +2170,7 @@ define <vscale x 2 x i8> @intrinsic_vleff_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8>
 entry:
   %a = call { <vscale x 2 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv2i8(
     <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
+    ptr %1,
     <vscale x 2 x i1> %2,
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 2 x i8>, iXLen } %a, 0
@@ -2182,10 +2182,10 @@ entry:
 
 declare { <vscale x 4 x i8>, iXLen } @llvm.riscv.vleff.nxv4i8(
   <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
+  ptr,
   iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vleff_v_nxv4i8_nxv4i8(<vscale x 4 x i8>* %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vleff_v_nxv4i8_nxv4i8(ptr %0, iXLen %1, iXLen* %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv4i8_nxv4i8:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -2204,7 +2204,7 @@ define <vscale x 4 x i8> @intrinsic_vleff_v_nxv4i8_nxv4i8(<vscale x 4 x i8>* %0,
 entry:
   %a = call { <vscale x 4 x i8>, iXLen } @llvm.riscv.vleff.nxv4i8(
     <vscale x 4 x i8> undef,
-    <vscale x 4 x i8>* %0,
+    ptr %0,
     iXLen %1)
   %b = extractvalue { <vscale x 4 x i8>, iXLen } %a, 0
   %c = extractvalue { <vscale x 4 x i8>, iXLen } %a, 1
@@ -2214,12 +2214,12 @@ entry:
 
 declare { <vscale x 4 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv4i8(
   <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
+  ptr,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vleff_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vleff_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv4i8_nxv4i8:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -2238,7 +2238,7 @@ define <vscale x 4 x i8> @intrinsic_vleff_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8>
 entry:
   %a = call { <vscale x 4 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv4i8(
     <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
+    ptr %1,
     <vscale x 4 x i1> %2,
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 4 x i8>, iXLen } %a, 0
@@ -2250,10 +2250,10 @@ entry:
 
 declare { <vscale x 8 x i8>, iXLen } @llvm.riscv.vleff.nxv8i8(
   <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
+  ptr,
   iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vleff_v_nxv8i8_nxv8i8(<vscale x 8 x i8>* %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vleff_v_nxv8i8_nxv8i8(ptr %0, iXLen %1, iXLen* %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv8i8_nxv8i8:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -2272,7 +2272,7 @@ define <vscale x 8 x i8> @intrinsic_vleff_v_nxv8i8_nxv8i8(<vscale x 8 x i8>* %0,
 entry:
   %a = call { <vscale x 8 x i8>, iXLen } @llvm.riscv.vleff.nxv8i8(
     <vscale x 8 x i8> undef,
-    <vscale x 8 x i8>* %0,
+    ptr %0,
     iXLen %1)
   %b = extractvalue { <vscale x 8 x i8>, iXLen } %a, 0
   %c = extractvalue { <vscale x 8 x i8>, iXLen } %a, 1
@@ -2282,12 +2282,12 @@ entry:
 
 declare { <vscale x 8 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv8i8(
   <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
+  ptr,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vleff_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vleff_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv8i8_nxv8i8:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -2306,7 +2306,7 @@ define <vscale x 8 x i8> @intrinsic_vleff_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8>
 entry:
   %a = call { <vscale x 8 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv8i8(
     <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
+    ptr %1,
     <vscale x 8 x i1> %2,
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 8 x i8>, iXLen } %a, 0
@@ -2318,10 +2318,10 @@ entry:
 
 declare { <vscale x 16 x i8>, iXLen } @llvm.riscv.vleff.nxv16i8(
   <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
+  ptr,
   iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vleff_v_nxv16i8_nxv16i8(<vscale x 16 x i8>* %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vleff_v_nxv16i8_nxv16i8(ptr %0, iXLen %1, iXLen* %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv16i8_nxv16i8:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -2340,7 +2340,7 @@ define <vscale x 16 x i8> @intrinsic_vleff_v_nxv16i8_nxv16i8(<vscale x 16 x i8>*
 entry:
   %a = call { <vscale x 16 x i8>, iXLen } @llvm.riscv.vleff.nxv16i8(
     <vscale x 16 x i8> undef,
-    <vscale x 16 x i8>* %0,
+    ptr %0,
     iXLen %1)
   %b = extractvalue { <vscale x 16 x i8>, iXLen } %a, 0
   %c = extractvalue { <vscale x 16 x i8>, iXLen } %a, 1
@@ -2350,12 +2350,12 @@ entry:
 
 declare { <vscale x 16 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv16i8(
   <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
+  ptr,
   <vscale x 16 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vleff_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vleff_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3, iXLen* %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv16i8_nxv16i8:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -2374,7 +2374,7 @@ define <vscale x 16 x i8> @intrinsic_vleff_mask_v_nxv16i8_nxv16i8(<vscale x 16 x
 entry:
   %a = call { <vscale x 16 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv16i8(
     <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
+    ptr %1,
     <vscale x 16 x i1> %2,
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 16 x i8>, iXLen } %a, 0
@@ -2386,10 +2386,10 @@ entry:
 
 declare { <vscale x 32 x i8>, iXLen } @llvm.riscv.vleff.nxv32i8(
   <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
+  ptr,
   iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vleff_v_nxv32i8_nxv32i8(<vscale x 32 x i8>* %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vleff_v_nxv32i8_nxv32i8(ptr %0, iXLen %1, iXLen* %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv32i8_nxv32i8:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -2408,7 +2408,7 @@ define <vscale x 32 x i8> @intrinsic_vleff_v_nxv32i8_nxv32i8(<vscale x 32 x i8>*
 entry:
   %a = call { <vscale x 32 x i8>, iXLen } @llvm.riscv.vleff.nxv32i8(
     <vscale x 32 x i8> undef,
-    <vscale x 32 x i8>* %0,
+    ptr %0,
     iXLen %1)
   %b = extractvalue { <vscale x 32 x i8>, iXLen } %a, 0
   %c = extractvalue { <vscale x 32 x i8>, iXLen } %a, 1
@@ -2418,12 +2418,12 @@ entry:
 
 declare { <vscale x 32 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv32i8(
   <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
+  ptr,
   <vscale x 32 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vleff_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vleff_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i1> %2, iXLen %3, iXLen* %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv32i8_nxv32i8:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -2442,7 +2442,7 @@ define <vscale x 32 x i8> @intrinsic_vleff_mask_v_nxv32i8_nxv32i8(<vscale x 32 x
 entry:
   %a = call { <vscale x 32 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv32i8(
     <vscale x 32 x i8> %0,
-    <vscale x 32 x i8>* %1,
+    ptr %1,
     <vscale x 32 x i1> %2,
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 32 x i8>, iXLen } %a, 0
@@ -2454,10 +2454,10 @@ entry:
 
 declare { <vscale x 64 x i8>, iXLen } @llvm.riscv.vleff.nxv64i8(
   <vscale x 64 x i8>,
-  <vscale x 64 x i8>*,
+  ptr,
   iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vleff_v_nxv64i8_nxv64i8(<vscale x 64 x i8>* %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vleff_v_nxv64i8_nxv64i8(ptr %0, iXLen %1, iXLen* %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv64i8_nxv64i8:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
@@ -2476,7 +2476,7 @@ define <vscale x 64 x i8> @intrinsic_vleff_v_nxv64i8_nxv64i8(<vscale x 64 x i8>*
 entry:
   %a = call { <vscale x 64 x i8>, iXLen } @llvm.riscv.vleff.nxv64i8(
     <vscale x 64 x i8> undef,
-    <vscale x 64 x i8>* %0,
+    ptr %0,
     iXLen %1)
   %b = extractvalue { <vscale x 64 x i8>, iXLen } %a, 0
   %c = extractvalue { <vscale x 64 x i8>, iXLen } %a, 1
@@ -2486,12 +2486,12 @@ entry:
 
 declare { <vscale x 64 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv64i8(
   <vscale x 64 x i8>,
-  <vscale x 64 x i8>*,
+  ptr,
   <vscale x 64 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vleff_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vleff_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, ptr %1, <vscale x 64 x i1> %2, iXLen %3, iXLen* %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv64i8_nxv64i8:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
@@ -2510,7 +2510,7 @@ define <vscale x 64 x i8> @intrinsic_vleff_mask_v_nxv64i8_nxv64i8(<vscale x 64 x
 entry:
   %a = call { <vscale x 64 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv64i8(
     <vscale x 64 x i8> %0,
-    <vscale x 64 x i8>* %1,
+    ptr %1,
     <vscale x 64 x i1> %2,
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 64 x i8>, iXLen } %a, 0
@@ -2521,7 +2521,7 @@ entry:
 }
 
 ; Test with the VL output unused
-define <vscale x 1 x double> @intrinsic_vleff_dead_vl(<vscale x 1 x double>* %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vleff_dead_vl(ptr %0, iXLen %1, iXLen* %2) nounwind {
 ; CHECK-LABEL: intrinsic_vleff_dead_vl:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -2530,13 +2530,13 @@ define <vscale x 1 x double> @intrinsic_vleff_dead_vl(<vscale x 1 x double>* %0,
 entry:
   %a = call { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.nxv1f64(
     <vscale x 1 x double> undef,
-    <vscale x 1 x double>* %0,
+    ptr %0,
     iXLen %1)
   %b = extractvalue { <vscale x 1 x double>, iXLen } %a, 0
   ret <vscale x 1 x double> %b
 }
 
-define <vscale x 1 x double> @intrinsic_vleff_mask_dead_vl(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+define <vscale x 1 x double> @intrinsic_vleff_mask_dead_vl(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vleff_mask_dead_vl:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -2545,7 +2545,7 @@ define <vscale x 1 x double> @intrinsic_vleff_mask_dead_vl(<vscale x 1 x double>
 entry:
   %a = call { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.mask.nxv1f64(
     <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
+    ptr %1,
     <vscale x 1 x i1> %2,
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 1 x double>, iXLen } %a, 0
@@ -2554,7 +2554,7 @@ entry:
 }
 
 ; Test with the loaded value unused
-define void @intrinsic_vleff_dead_value(<vscale x 1 x double>* %0, iXLen %1, iXLen* %2) nounwind {
+define void @intrinsic_vleff_dead_value(ptr %0, iXLen %1, iXLen* %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_dead_value:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -2573,14 +2573,14 @@ define void @intrinsic_vleff_dead_value(<vscale x 1 x double>* %0, iXLen %1, iXL
 entry:
   %a = call { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.nxv1f64(
     <vscale x 1 x double> undef,
-    <vscale x 1 x double>* %0,
+    ptr %0,
     iXLen %1)
   %b = extractvalue { <vscale x 1 x double>, iXLen } %a, 1
   store iXLen %b, iXLen* %2
   ret void
 }
 
-define void @intrinsic_vleff_mask_dead_value(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define void @intrinsic_vleff_mask_dead_value(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_dead_value:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -2599,7 +2599,7 @@ define void @intrinsic_vleff_mask_dead_value(<vscale x 1 x double> %0, <vscale x
 entry:
   %a = call { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.mask.nxv1f64(
     <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
+    ptr %1,
     <vscale x 1 x i1> %2,
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 1 x double>, iXLen } %a, 1
@@ -2609,7 +2609,7 @@ entry:
 }
 
 ; Test with both outputs dead. Make sure the vleff isn't deleted.
-define void @intrinsic_vleff_dead_all(<vscale x 1 x double>* %0, iXLen %1, iXLen* %2) nounwind {
+define void @intrinsic_vleff_dead_all(ptr %0, iXLen %1, iXLen* %2) nounwind {
 ; CHECK-LABEL: intrinsic_vleff_dead_all:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -2618,12 +2618,12 @@ define void @intrinsic_vleff_dead_all(<vscale x 1 x double>* %0, iXLen %1, iXLen
 entry:
   %a = call { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.nxv1f64(
     <vscale x 1 x double> undef,
-    <vscale x 1 x double>* %0,
+    ptr %0,
     iXLen %1)
   ret void
 }
 
-define void @intrinsic_vleff_mask_dead_all(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+define void @intrinsic_vleff_mask_dead_all(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vleff_mask_dead_all:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -2632,7 +2632,7 @@ define void @intrinsic_vleff_mask_dead_all(<vscale x 1 x double> %0, <vscale x 1
 entry:
   %a = call { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.mask.nxv1f64(
     <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
+    ptr %1,
     <vscale x 1 x i1> %2,
     iXLen %3, iXLen 1)
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vlm.ll b/llvm/test/CodeGen/RISCV/rvv/vlm.ll
index 2e03c14abf193..7f4b777b06eb0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vlm.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vlm.ll
@@ -4,93 +4,93 @@
 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
 ; RUN:   -verify-machineinstrs | FileCheck %s
 
-declare <vscale x 1 x i1> @llvm.riscv.vlm.nxv1i1(<vscale x 1 x i1>*, iXLen);
+declare <vscale x 1 x i1> @llvm.riscv.vlm.nxv1i1(ptr, iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vlm_v_nxv1i1(<vscale x 1 x i1>* %0, iXLen %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vlm_v_nxv1i1(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vlm_v_nxv1i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vlm.v v0, (a0)
 ; CHECK-NEXT:    ret
 entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vlm.nxv1i1(<vscale x 1 x i1>* %0, iXLen %1)
+  %a = call <vscale x 1 x i1> @llvm.riscv.vlm.nxv1i1(ptr %0, iXLen %1)
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vlm.nxv2i1(<vscale x 2 x i1>*, iXLen);
+declare <vscale x 2 x i1> @llvm.riscv.vlm.nxv2i1(ptr, iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vlm_v_nxv2i1(<vscale x 2 x i1>* %0, iXLen %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vlm_v_nxv2i1(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vlm_v_nxv2i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vlm.v v0, (a0)
 ; CHECK-NEXT:    ret
 entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vlm.nxv2i1(<vscale x 2 x i1>* %0, iXLen %1)
+  %a = call <vscale x 2 x i1> @llvm.riscv.vlm.nxv2i1(ptr %0, iXLen %1)
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vlm.nxv4i1(<vscale x 4 x i1>*, iXLen);
+declare <vscale x 4 x i1> @llvm.riscv.vlm.nxv4i1(ptr, iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vlm_v_nxv4i1(<vscale x 4 x i1>* %0, iXLen %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vlm_v_nxv4i1(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vlm_v_nxv4i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vlm.v v0, (a0)
 ; CHECK-NEXT:    ret
 entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vlm.nxv4i1(<vscale x 4 x i1>* %0, iXLen %1)
+  %a = call <vscale x 4 x i1> @llvm.riscv.vlm.nxv4i1(ptr %0, iXLen %1)
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vlm.nxv8i1(<vscale x 8 x i1>*, iXLen);
+declare <vscale x 8 x i1> @llvm.riscv.vlm.nxv8i1(ptr, iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vlm_v_nxv8i1(<vscale x 8 x i1>* %0, iXLen %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vlm_v_nxv8i1(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vlm_v_nxv8i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vlm.v v0, (a0)
 ; CHECK-NEXT:    ret
 entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vlm.nxv8i1(<vscale x 8 x i1>* %0, iXLen %1)
+  %a = call <vscale x 8 x i1> @llvm.riscv.vlm.nxv8i1(ptr %0, iXLen %1)
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vlm.nxv16i1(<vscale x 16 x i1>*, iXLen);
+declare <vscale x 16 x i1> @llvm.riscv.vlm.nxv16i1(ptr, iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vlm_v_nxv16i1(<vscale x 16 x i1>* %0, iXLen %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vlm_v_nxv16i1(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vlm_v_nxv16i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vlm.v v0, (a0)
 ; CHECK-NEXT:    ret
 entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vlm.nxv16i1(<vscale x 16 x i1>* %0, iXLen %1)
+  %a = call <vscale x 16 x i1> @llvm.riscv.vlm.nxv16i1(ptr %0, iXLen %1)
   ret <vscale x 16 x i1> %a
 }
 
-declare <vscale x 32 x i1> @llvm.riscv.vlm.nxv32i1(<vscale x 32 x i1>*, iXLen);
+declare <vscale x 32 x i1> @llvm.riscv.vlm.nxv32i1(ptr, iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vlm_v_nxv32i1(<vscale x 32 x i1>* %0, iXLen %1) nounwind {
+define <vscale x 32 x i1> @intrinsic_vlm_v_nxv32i1(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vlm_v_nxv32i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    vlm.v v0, (a0)
 ; CHECK-NEXT:    ret
 entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vlm.nxv32i1(<vscale x 32 x i1>* %0, iXLen %1)
+  %a = call <vscale x 32 x i1> @llvm.riscv.vlm.nxv32i1(ptr %0, iXLen %1)
   ret <vscale x 32 x i1> %a
 }
 
-declare <vscale x 64 x i1> @llvm.riscv.vlm.nxv64i1(<vscale x 64 x i1>*, iXLen);
+declare <vscale x 64 x i1> @llvm.riscv.vlm.nxv64i1(ptr, iXLen);
 
-define <vscale x 64 x i1> @intrinsic_vlm_v_nxv64i1(<vscale x 64 x i1>* %0, iXLen %1) nounwind {
+define <vscale x 64 x i1> @intrinsic_vlm_v_nxv64i1(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vlm_v_nxv64i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
 ; CHECK-NEXT:    vlm.v v0, (a0)
 ; CHECK-NEXT:    ret
 entry:
-  %a = call <vscale x 64 x i1> @llvm.riscv.vlm.nxv64i1(<vscale x 64 x i1>* %0, iXLen %1)
+  %a = call <vscale x 64 x i1> @llvm.riscv.vlm.nxv64i1(ptr %0, iXLen %1)
   ret <vscale x 64 x i1> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll
index 47488d8c4293e..c56ee04fb6f60 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll
@@ -6,11 +6,11 @@
 
 declare <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i64(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   <vscale x 1 x i64>,
   i64);
 
-define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8>* %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -20,7 +20,7 @@ define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i64(
     <vscale x 1 x i8> undef,
-    <vscale x 1 x i8>* %0,
+    ptr %0,
     <vscale x 1 x i64> %1,
     i64 %2)
 
@@ -29,13 +29,13 @@ entry:
 
 declare <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
   i64,
   i64);
 
-define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -44,7 +44,7 @@ define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64(
     <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
+    ptr %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
     i64 %4, i64 1)
@@ -54,11 +54,11 @@ entry:
 
 declare <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i64(
   <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
+  ptr,
   <vscale x 2 x i64>,
   i64);
 
-define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8>* %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -68,7 +68,7 @@ define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i64(
     <vscale x 2 x i8> undef,
-    <vscale x 2 x i8>* %0,
+    ptr %0,
     <vscale x 2 x i64> %1,
     i64 %2)
 
@@ -77,13 +77,13 @@ entry:
 
 declare <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64(
   <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
+  ptr,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
   i64,
   i64);
 
-define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -92,7 +92,7 @@ define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64(
     <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
+    ptr %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
     i64 %4, i64 1)
@@ -102,11 +102,11 @@ entry:
 
 declare <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i64(
   <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
+  ptr,
   <vscale x 4 x i64>,
   i64);
 
-define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8>* %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -116,7 +116,7 @@ define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i64(
     <vscale x 4 x i8> undef,
-    <vscale x 4 x i8>* %0,
+    ptr %0,
     <vscale x 4 x i64> %1,
     i64 %2)
 
@@ -125,13 +125,13 @@ entry:
 
 declare <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64(
   <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
+  ptr,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
   i64,
   i64);
 
-define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -140,7 +140,7 @@ define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64(
     <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
+    ptr %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
     i64 %4, i64 1)
@@ -150,11 +150,11 @@ entry:
 
 declare <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i64(
   <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
+  ptr,
   <vscale x 8 x i64>,
   i64);
 
-define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8>* %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -164,7 +164,7 @@ define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i64(
     <vscale x 8 x i8> undef,
-    <vscale x 8 x i8>* %0,
+    ptr %0,
     <vscale x 8 x i64> %1,
     i64 %2)
 
@@ -173,13 +173,13 @@ entry:
 
 declare <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64(
   <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
+  ptr,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
   i64,
   i64);
 
-define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -188,7 +188,7 @@ define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64(
     <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
+    ptr %1,
     <vscale x 8 x i64> %2,
     <vscale x 8 x i1> %3,
     i64 %4, i64 1)
@@ -198,11 +198,11 @@ entry:
 
 declare <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i64(
   <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
+  ptr,
   <vscale x 1 x i64>,
   i64);
 
-define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16>* %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -212,7 +212,7 @@ define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i64(<vscale x
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i64(
     <vscale x 1 x i16> undef,
-    <vscale x 1 x i16>* %0,
+    ptr %0,
     <vscale x 1 x i64> %1,
     i64 %2)
 
@@ -221,13 +221,13 @@ entry:
 
 declare <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64(
   <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
+  ptr,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
   i64,
   i64);
 
-define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -236,7 +236,7 @@ define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i64(<vsca
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64(
     <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
+    ptr %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
     i64 %4, i64 1)
@@ -246,11 +246,11 @@ entry:
 
 declare <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i64(
   <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
+  ptr,
   <vscale x 2 x i64>,
   i64);
 
-define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16>* %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -260,7 +260,7 @@ define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i64(<vscale x
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i64(
     <vscale x 2 x i16> undef,
-    <vscale x 2 x i16>* %0,
+    ptr %0,
     <vscale x 2 x i64> %1,
     i64 %2)
 
@@ -269,13 +269,13 @@ entry:
 
 declare <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64(
   <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
+  ptr,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
   i64,
   i64);
 
-define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -284,7 +284,7 @@ define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i64(<vsca
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64(
     <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
+    ptr %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
     i64 %4, i64 1)
@@ -294,11 +294,11 @@ entry:
 
 declare <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i64(
   <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
+  ptr,
   <vscale x 4 x i64>,
   i64);
 
-define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16>* %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -308,7 +308,7 @@ define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i64(<vscale x
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i64(
     <vscale x 4 x i16> undef,
-    <vscale x 4 x i16>* %0,
+    ptr %0,
     <vscale x 4 x i64> %1,
     i64 %2)
 
@@ -317,13 +317,13 @@ entry:
 
 declare <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64(
   <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
+  ptr,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
   i64,
   i64);
 
-define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -332,7 +332,7 @@ define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i64(<vsca
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64(
     <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
+    ptr %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
     i64 %4, i64 1)
@@ -342,11 +342,11 @@ entry:
 
 declare <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i64(
   <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
+  ptr,
   <vscale x 8 x i64>,
   i64);
 
-define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16>* %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -356,7 +356,7 @@ define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i64(<vscale x
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i64(
     <vscale x 8 x i16> undef,
-    <vscale x 8 x i16>* %0,
+    ptr %0,
     <vscale x 8 x i64> %1,
     i64 %2)
 
@@ -365,13 +365,13 @@ entry:
 
 declare <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64(
   <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
+  ptr,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
   i64,
   i64);
 
-define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -380,7 +380,7 @@ define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i64(<vsca
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64(
     <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
+    ptr %1,
     <vscale x 8 x i64> %2,
     <vscale x 8 x i1> %3,
     i64 %4, i64 1)
@@ -390,11 +390,11 @@ entry:
 
 declare <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i64(
   <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
+  ptr,
   <vscale x 1 x i64>,
   i64);
 
-define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32>* %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -404,7 +404,7 @@ define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i64(<vscale x
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i64(
     <vscale x 1 x i32> undef,
-    <vscale x 1 x i32>* %0,
+    ptr %0,
     <vscale x 1 x i64> %1,
     i64 %2)
 
@@ -413,13 +413,13 @@ entry:
 
 declare <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64(
   <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
+  ptr,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
   i64,
   i64);
 
-define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -428,7 +428,7 @@ define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i64(<vsca
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
+    ptr %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
     i64 %4, i64 1)
@@ -438,11 +438,11 @@ entry:
 
 declare <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i64(
   <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
+  ptr,
   <vscale x 2 x i64>,
   i64);
 
-define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32>* %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -452,7 +452,7 @@ define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i64(<vscale x
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i64(
     <vscale x 2 x i32> undef,
-    <vscale x 2 x i32>* %0,
+    ptr %0,
     <vscale x 2 x i64> %1,
     i64 %2)
 
@@ -461,13 +461,13 @@ entry:
 
 declare <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64(
   <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
+  ptr,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
   i64,
   i64);
 
-define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -476,7 +476,7 @@ define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i64(<vsca
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
+    ptr %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
     i64 %4, i64 1)
@@ -486,11 +486,11 @@ entry:
 
 declare <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   <vscale x 4 x i64>,
   i64);
 
-define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32>* %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -500,7 +500,7 @@ define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i64(<vscale x
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64(
     <vscale x 4 x i32> undef,
-    <vscale x 4 x i32>* %0,
+    ptr %0,
     <vscale x 4 x i64> %1,
     i64 %2)
 
@@ -509,13 +509,13 @@ entry:
 
 declare <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
   i64,
   i64);
 
-define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -524,7 +524,7 @@ define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i64(<vsca
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
+    ptr %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
     i64 %4, i64 1)
@@ -534,11 +534,11 @@ entry:
 
 declare <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i64(
   <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
+  ptr,
   <vscale x 8 x i64>,
   i64);
 
-define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32>* %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -548,7 +548,7 @@ define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i64(<vscale x
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i64(
     <vscale x 8 x i32> undef,
-    <vscale x 8 x i32>* %0,
+    ptr %0,
     <vscale x 8 x i64> %1,
     i64 %2)
 
@@ -557,13 +557,13 @@ entry:
 
 declare <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64(
   <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
+  ptr,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
   i64,
   i64);
 
-define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -572,7 +572,7 @@ define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i64(<vsca
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
+    ptr %1,
     <vscale x 8 x i64> %2,
     <vscale x 8 x i1> %3,
     i64 %4, i64 1)
@@ -582,11 +582,11 @@ entry:
 
 declare <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   <vscale x 1 x i64>,
   i64);
 
-define <vscale x 1 x i64> @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64>* %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -595,7 +595,7 @@ define <vscale x 1 x i64> @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i64(<vscale x
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i64(
     <vscale x 1 x i64> undef,
-    <vscale x 1 x i64>* %0,
+    ptr %0,
     <vscale x 1 x i64> %1,
     i64 %2)
 
@@ -604,13 +604,13 @@ entry:
 
 declare <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
   i64,
   i64);
 
-define <vscale x 1 x i64> @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -619,7 +619,7 @@ define <vscale x 1 x i64> @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i64(<vsca
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
+    ptr %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
     i64 %4, i64 1)
@@ -629,11 +629,11 @@ entry:
 
 declare <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
+  ptr,
   <vscale x 2 x i64>,
   i64);
 
-define <vscale x 2 x i64> @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64>* %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -642,7 +642,7 @@ define <vscale x 2 x i64> @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i64(<vscale x
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i64(
     <vscale x 2 x i64> undef,
-    <vscale x 2 x i64>* %0,
+    ptr %0,
     <vscale x 2 x i64> %1,
     i64 %2)
 
@@ -651,13 +651,13 @@ entry:
 
 declare <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
+  ptr,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
   i64,
   i64);
 
-define <vscale x 2 x i64> @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -666,7 +666,7 @@ define <vscale x 2 x i64> @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i64(<vsca
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
+    ptr %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
     i64 %4, i64 1)
@@ -676,11 +676,11 @@ entry:
 
 declare <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
+  ptr,
   <vscale x 4 x i64>,
   i64);
 
-define <vscale x 4 x i64> @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64>* %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -689,7 +689,7 @@ define <vscale x 4 x i64> @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i64(<vscale x
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i64(
     <vscale x 4 x i64> undef,
-    <vscale x 4 x i64>* %0,
+    ptr %0,
     <vscale x 4 x i64> %1,
     i64 %2)
 
@@ -698,13 +698,13 @@ entry:
 
 declare <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
+  ptr,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
   i64,
   i64);
 
-define <vscale x 4 x i64> @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -713,7 +713,7 @@ define <vscale x 4 x i64> @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i64(<vsca
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
+    ptr %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
     i64 %4, i64 1)
@@ -723,11 +723,11 @@ entry:
 
 declare <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i64(
   <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
+  ptr,
   <vscale x 8 x i64>,
   i64);
 
-define <vscale x 8 x i64> @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64>* %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -736,7 +736,7 @@ define <vscale x 8 x i64> @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i64(<vscale x
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i64(
     <vscale x 8 x i64> undef,
-    <vscale x 8 x i64>* %0,
+    ptr %0,
     <vscale x 8 x i64> %1,
     i64 %2)
 
@@ -745,13 +745,13 @@ entry:
 
 declare <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64(
   <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
+  ptr,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
   i64,
   i64);
 
-define <vscale x 8 x i64> @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -760,7 +760,7 @@ define <vscale x 8 x i64> @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i64(<vsca
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
+    ptr %1,
     <vscale x 8 x i64> %2,
     <vscale x 8 x i1> %3,
     i64 %4, i64 1)
@@ -770,11 +770,11 @@ entry:
 
 declare <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i64(
   <vscale x 1 x half>,
-  <vscale x 1 x half>*,
+  ptr,
   <vscale x 1 x i64>,
   i64);
 
-define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half>* %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -784,7 +784,7 @@ define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i64(<vscale x
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i64(
     <vscale x 1 x half> undef,
-    <vscale x 1 x half>* %0,
+    ptr %0,
     <vscale x 1 x i64> %1,
     i64 %2)
 
@@ -793,13 +793,13 @@ entry:
 
 declare <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i64(
   <vscale x 1 x half>,
-  <vscale x 1 x half>*,
+  ptr,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
   i64,
   i64);
 
-define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -808,7 +808,7 @@ define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i64(<vsc
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i64(
     <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
+    ptr %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
     i64 %4, i64 1)
@@ -818,11 +818,11 @@ entry:
 
 declare <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i64(
   <vscale x 2 x half>,
-  <vscale x 2 x half>*,
+  ptr,
   <vscale x 2 x i64>,
   i64);
 
-define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half>* %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -832,7 +832,7 @@ define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i64(<vscale x
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i64(
     <vscale x 2 x half> undef,
-    <vscale x 2 x half>* %0,
+    ptr %0,
     <vscale x 2 x i64> %1,
     i64 %2)
 
@@ -841,13 +841,13 @@ entry:
 
 declare <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i64(
   <vscale x 2 x half>,
-  <vscale x 2 x half>*,
+  ptr,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
   i64,
   i64);
 
-define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -856,7 +856,7 @@ define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i64(<vsc
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i64(
     <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
+    ptr %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
     i64 %4, i64 1)
@@ -866,11 +866,11 @@ entry:
 
 declare <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i64(
   <vscale x 4 x half>,
-  <vscale x 4 x half>*,
+  ptr,
   <vscale x 4 x i64>,
   i64);
 
-define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half>* %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -880,7 +880,7 @@ define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i64(<vscale x
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i64(
     <vscale x 4 x half> undef,
-    <vscale x 4 x half>* %0,
+    ptr %0,
     <vscale x 4 x i64> %1,
     i64 %2)
 
@@ -889,13 +889,13 @@ entry:
 
 declare <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i64(
   <vscale x 4 x half>,
-  <vscale x 4 x half>*,
+  ptr,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
   i64,
   i64);
 
-define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -904,7 +904,7 @@ define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i64(<vsc
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i64(
     <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
+    ptr %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
     i64 %4, i64 1)
@@ -914,11 +914,11 @@ entry:
 
 declare <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i64(
   <vscale x 8 x half>,
-  <vscale x 8 x half>*,
+  ptr,
   <vscale x 8 x i64>,
   i64);
 
-define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half>* %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -928,7 +928,7 @@ define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i64(<vscale x
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i64(
     <vscale x 8 x half> undef,
-    <vscale x 8 x half>* %0,
+    ptr %0,
     <vscale x 8 x i64> %1,
     i64 %2)
 
@@ -937,13 +937,13 @@ entry:
 
 declare <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i64(
   <vscale x 8 x half>,
-  <vscale x 8 x half>*,
+  ptr,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
   i64,
   i64);
 
-define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -952,7 +952,7 @@ define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i64(<vsc
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i64(
     <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
+    ptr %1,
     <vscale x 8 x i64> %2,
     <vscale x 8 x i1> %3,
     i64 %4, i64 1)
@@ -962,11 +962,11 @@ entry:
 
 declare <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i64(
   <vscale x 1 x float>,
-  <vscale x 1 x float>*,
+  ptr,
   <vscale x 1 x i64>,
   i64);
 
-define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float>* %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -976,7 +976,7 @@ define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i64(<vscale
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i64(
     <vscale x 1 x float> undef,
-    <vscale x 1 x float>* %0,
+    ptr %0,
     <vscale x 1 x i64> %1,
     i64 %2)
 
@@ -985,13 +985,13 @@ entry:
 
 declare <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64(
   <vscale x 1 x float>,
-  <vscale x 1 x float>*,
+  ptr,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
   i64,
   i64);
 
-define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -1000,7 +1000,7 @@ define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i64(<vs
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64(
     <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
+    ptr %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
     i64 %4, i64 1)
@@ -1010,11 +1010,11 @@ entry:
 
 declare <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i64(
   <vscale x 2 x float>,
-  <vscale x 2 x float>*,
+  ptr,
   <vscale x 2 x i64>,
   i64);
 
-define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float>* %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1024,7 +1024,7 @@ define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i64(<vscale
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i64(
     <vscale x 2 x float> undef,
-    <vscale x 2 x float>* %0,
+    ptr %0,
     <vscale x 2 x i64> %1,
     i64 %2)
 
@@ -1033,13 +1033,13 @@ entry:
 
 declare <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64(
   <vscale x 2 x float>,
-  <vscale x 2 x float>*,
+  ptr,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
   i64,
   i64);
 
-define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -1048,7 +1048,7 @@ define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i64(<vs
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64(
     <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
+    ptr %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
     i64 %4, i64 1)
@@ -1058,11 +1058,11 @@ entry:
 
 declare <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i64(
   <vscale x 4 x float>,
-  <vscale x 4 x float>*,
+  ptr,
   <vscale x 4 x i64>,
   i64);
 
-define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float>* %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1072,7 +1072,7 @@ define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i64(<vscale
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i64(
     <vscale x 4 x float> undef,
-    <vscale x 4 x float>* %0,
+    ptr %0,
     <vscale x 4 x i64> %1,
     i64 %2)
 
@@ -1081,13 +1081,13 @@ entry:
 
 declare <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64(
   <vscale x 4 x float>,
-  <vscale x 4 x float>*,
+  ptr,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
   i64,
   i64);
 
-define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -1096,7 +1096,7 @@ define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i64(<vs
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64(
     <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
+    ptr %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
     i64 %4, i64 1)
@@ -1106,11 +1106,11 @@ entry:
 
 declare <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i64(
   <vscale x 8 x float>,
-  <vscale x 8 x float>*,
+  ptr,
   <vscale x 8 x i64>,
   i64);
 
-define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float>* %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1120,7 +1120,7 @@ define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i64(<vscale
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i64(
     <vscale x 8 x float> undef,
-    <vscale x 8 x float>* %0,
+    ptr %0,
     <vscale x 8 x i64> %1,
     i64 %2)
 
@@ -1129,13 +1129,13 @@ entry:
 
 declare <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64(
   <vscale x 8 x float>,
-  <vscale x 8 x float>*,
+  ptr,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
   i64,
   i64);
 
-define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -1144,7 +1144,7 @@ define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i64(<vs
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64(
     <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
+    ptr %1,
     <vscale x 8 x i64> %2,
     <vscale x 8 x i1> %3,
     i64 %4, i64 1)
@@ -1154,11 +1154,11 @@ entry:
 
 declare <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i64(
   <vscale x 1 x double>,
-  <vscale x 1 x double>*,
+  ptr,
   <vscale x 1 x i64>,
   i64);
 
-define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double>* %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -1167,7 +1167,7 @@ define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i64(<vscale
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i64(
     <vscale x 1 x double> undef,
-    <vscale x 1 x double>* %0,
+    ptr %0,
     <vscale x 1 x i64> %1,
     i64 %2)
 
@@ -1176,13 +1176,13 @@ entry:
 
 declare <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64(
   <vscale x 1 x double>,
-  <vscale x 1 x double>*,
+  ptr,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
   i64,
   i64);
 
-define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -1191,7 +1191,7 @@ define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i64(<v
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64(
     <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
+    ptr %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
     i64 %4, i64 1)
@@ -1201,11 +1201,11 @@ entry:
 
 declare <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i64(
   <vscale x 2 x double>,
-  <vscale x 2 x double>*,
+  ptr,
   <vscale x 2 x i64>,
   i64);
 
-define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double>* %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -1214,7 +1214,7 @@ define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i64(<vscale
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i64(
     <vscale x 2 x double> undef,
-    <vscale x 2 x double>* %0,
+    ptr %0,
     <vscale x 2 x i64> %1,
     i64 %2)
 
@@ -1223,13 +1223,13 @@ entry:
 
 declare <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64(
   <vscale x 2 x double>,
-  <vscale x 2 x double>*,
+  ptr,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
   i64,
   i64);
 
-define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -1238,7 +1238,7 @@ define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i64(<v
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64(
     <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
+    ptr %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
     i64 %4, i64 1)
@@ -1248,11 +1248,11 @@ entry:
 
 declare <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i64(
   <vscale x 4 x double>,
-  <vscale x 4 x double>*,
+  ptr,
   <vscale x 4 x i64>,
   i64);
 
-define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double>* %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -1261,7 +1261,7 @@ define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i64(<vscale
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i64(
     <vscale x 4 x double> undef,
-    <vscale x 4 x double>* %0,
+    ptr %0,
     <vscale x 4 x i64> %1,
     i64 %2)
 
@@ -1270,13 +1270,13 @@ entry:
 
 declare <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64(
   <vscale x 4 x double>,
-  <vscale x 4 x double>*,
+  ptr,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
   i64,
   i64);
 
-define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -1285,7 +1285,7 @@ define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i64(<v
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64(
     <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
+    ptr %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
     i64 %4, i64 1)
@@ -1295,11 +1295,11 @@ entry:
 
 declare <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i64(
   <vscale x 8 x double>,
-  <vscale x 8 x double>*,
+  ptr,
   <vscale x 8 x i64>,
   i64);
 
-define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double>* %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -1308,7 +1308,7 @@ define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i64(<vscale
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i64(
     <vscale x 8 x double> undef,
-    <vscale x 8 x double>* %0,
+    ptr %0,
     <vscale x 8 x i64> %1,
     i64 %2)
 
@@ -1317,13 +1317,13 @@ entry:
 
 declare <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64(
   <vscale x 8 x double>,
-  <vscale x 8 x double>*,
+  ptr,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
   i64,
   i64);
 
-define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -1332,7 +1332,7 @@ define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i64(<v
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64(
     <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
+    ptr %1,
     <vscale x 8 x i64> %2,
     <vscale x 8 x i1> %3,
     i64 %4, i64 1)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vloxei.ll b/llvm/test/CodeGen/RISCV/rvv/vloxei.ll
index 9c562e0d5ab2a..8f0141526a62b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vloxei.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vloxei.ll
@@ -6,11 +6,11 @@
 
 declare <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i32(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   <vscale x 1 x i32>,
   iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8>* %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -20,7 +20,7 @@ define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i32(
     <vscale x 1 x i8> undef,
-    <vscale x 1 x i8>* %0,
+    ptr %0,
     <vscale x 1 x i32> %1,
     iXLen %2)
 
@@ -29,13 +29,13 @@ entry:
 
 declare <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -44,7 +44,7 @@ define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32(
     <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
+    ptr %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -54,11 +54,11 @@ entry:
 
 declare <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i32(
   <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
+  ptr,
   <vscale x 2 x i32>,
   iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8>* %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -68,7 +68,7 @@ define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i32(
     <vscale x 2 x i8> undef,
-    <vscale x 2 x i8>* %0,
+    ptr %0,
     <vscale x 2 x i32> %1,
     iXLen %2)
 
@@ -77,13 +77,13 @@ entry:
 
 declare <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32(
   <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
+  ptr,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -92,7 +92,7 @@ define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32(
     <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
+    ptr %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
     iXLen %4, iXLen 1)
@@ -102,11 +102,11 @@ entry:
 
 declare <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i32(
   <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
+  ptr,
   <vscale x 4 x i32>,
   iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8>* %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -116,7 +116,7 @@ define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i32(
     <vscale x 4 x i8> undef,
-    <vscale x 4 x i8>* %0,
+    ptr %0,
     <vscale x 4 x i32> %1,
     iXLen %2)
 
@@ -125,13 +125,13 @@ entry:
 
 declare <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32(
   <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
+  ptr,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -140,7 +140,7 @@ define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32(
     <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
+    ptr %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
     iXLen %4, iXLen 1)
@@ -150,11 +150,11 @@ entry:
 
 declare <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i32(
   <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
+  ptr,
   <vscale x 8 x i32>,
   iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8>* %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -164,7 +164,7 @@ define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i32(
     <vscale x 8 x i8> undef,
-    <vscale x 8 x i8>* %0,
+    ptr %0,
     <vscale x 8 x i32> %1,
     iXLen %2)
 
@@ -173,13 +173,13 @@ entry:
 
 declare <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32(
   <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
+  ptr,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -188,7 +188,7 @@ define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32(
     <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
+    ptr %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
     iXLen %4, iXLen 1)
@@ -198,11 +198,11 @@ entry:
 
 declare <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i32(
   <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
+  ptr,
   <vscale x 16 x i32>,
   iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8>* %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -212,7 +212,7 @@ define <vscale x 16 x i8> @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i32(<vscale x
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i32(
     <vscale x 16 x i8> undef,
-    <vscale x 16 x i8>* %0,
+    ptr %0,
     <vscale x 16 x i32> %1,
     iXLen %2)
 
@@ -221,13 +221,13 @@ entry:
 
 declare <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32(
   <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
+  ptr,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -236,7 +236,7 @@ define <vscale x 16 x i8> @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vsc
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32(
     <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
+    ptr %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
     iXLen %4, iXLen 1)
@@ -246,11 +246,11 @@ entry:
 
 declare <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i32(
   <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
+  ptr,
   <vscale x 1 x i32>,
   iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16>* %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -260,7 +260,7 @@ define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i32(<vscale x
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i32(
     <vscale x 1 x i16> undef,
-    <vscale x 1 x i16>* %0,
+    ptr %0,
     <vscale x 1 x i32> %1,
     iXLen %2)
 
@@ -269,13 +269,13 @@ entry:
 
 declare <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32(
   <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
+  ptr,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -284,7 +284,7 @@ define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vsca
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
+    ptr %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -294,11 +294,11 @@ entry:
 
 declare <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i32(
   <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
+  ptr,
   <vscale x 2 x i32>,
   iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16>* %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -308,7 +308,7 @@ define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i32(<vscale x
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i32(
     <vscale x 2 x i16> undef,
-    <vscale x 2 x i16>* %0,
+    ptr %0,
     <vscale x 2 x i32> %1,
     iXLen %2)
 
@@ -317,13 +317,13 @@ entry:
 
 declare <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32(
   <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
+  ptr,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -332,7 +332,7 @@ define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vsca
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
+    ptr %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
     iXLen %4, iXLen 1)
@@ -342,11 +342,11 @@ entry:
 
 declare <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i32(
   <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
+  ptr,
   <vscale x 4 x i32>,
   iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16>* %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -356,7 +356,7 @@ define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i32(<vscale x
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i32(
     <vscale x 4 x i16> undef,
-    <vscale x 4 x i16>* %0,
+    ptr %0,
     <vscale x 4 x i32> %1,
     iXLen %2)
 
@@ -365,13 +365,13 @@ entry:
 
 declare <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32(
   <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
+  ptr,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -380,7 +380,7 @@ define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vsca
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
+    ptr %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
     iXLen %4, iXLen 1)
@@ -390,11 +390,11 @@ entry:
 
 declare <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i32(
   <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
+  ptr,
   <vscale x 8 x i32>,
   iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16>* %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -404,7 +404,7 @@ define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i32(<vscale x
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i32(
     <vscale x 8 x i16> undef,
-    <vscale x 8 x i16>* %0,
+    ptr %0,
     <vscale x 8 x i32> %1,
     iXLen %2)
 
@@ -413,13 +413,13 @@ entry:
 
 declare <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32(
   <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
+  ptr,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -428,7 +428,7 @@ define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vsca
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
+    ptr %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
     iXLen %4, iXLen 1)
@@ -438,11 +438,11 @@ entry:
 
 declare <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i32(
   <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
+  ptr,
   <vscale x 16 x i32>,
   iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16>* %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -452,7 +452,7 @@ define <vscale x 16 x i16> @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i32(<vscal
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i32(
     <vscale x 16 x i16> undef,
-    <vscale x 16 x i16>* %0,
+    ptr %0,
     <vscale x 16 x i32> %1,
     iXLen %2)
 
@@ -461,13 +461,13 @@ entry:
 
 declare <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32(
   <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
+  ptr,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -476,7 +476,7 @@ define <vscale x 16 x i16> @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
+    ptr %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
     iXLen %4, iXLen 1)
@@ -486,11 +486,11 @@ entry:
 
 declare <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
+  ptr,
   <vscale x 1 x i32>,
   iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32>* %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -499,7 +499,7 @@ define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i32(<vscale x
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i32(
     <vscale x 1 x i32> undef,
-    <vscale x 1 x i32>* %0,
+    ptr %0,
     <vscale x 1 x i32> %1,
     iXLen %2)
 
@@ -508,13 +508,13 @@ entry:
 
 declare <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
+  ptr,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -523,7 +523,7 @@ define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vsca
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
+    ptr %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -533,11 +533,11 @@ entry:
 
 declare <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
+  ptr,
   <vscale x 2 x i32>,
   iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32>* %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -546,7 +546,7 @@ define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i32(<vscale x
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i32(
     <vscale x 2 x i32> undef,
-    <vscale x 2 x i32>* %0,
+    ptr %0,
     <vscale x 2 x i32> %1,
     iXLen %2)
 
@@ -555,13 +555,13 @@ entry:
 
 declare <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
+  ptr,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -570,7 +570,7 @@ define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vsca
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
+    ptr %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
     iXLen %4, iXLen 1)
@@ -580,11 +580,11 @@ entry:
 
 declare <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   <vscale x 4 x i32>,
   iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32>* %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -593,7 +593,7 @@ define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i32(<vscale x
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i32(
     <vscale x 4 x i32> undef,
-    <vscale x 4 x i32>* %0,
+    ptr %0,
     <vscale x 4 x i32> %1,
     iXLen %2)
 
@@ -602,13 +602,13 @@ entry:
 
 declare <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -617,7 +617,7 @@ define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vsca
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
+    ptr %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
     iXLen %4, iXLen 1)
@@ -627,11 +627,11 @@ entry:
 
 declare <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
+  ptr,
   <vscale x 8 x i32>,
   iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32>* %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -640,7 +640,7 @@ define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i32(<vscale x
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i32(
     <vscale x 8 x i32> undef,
-    <vscale x 8 x i32>* %0,
+    ptr %0,
     <vscale x 8 x i32> %1,
     iXLen %2)
 
@@ -649,13 +649,13 @@ entry:
 
 declare <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
+  ptr,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -664,7 +664,7 @@ define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vsca
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
+    ptr %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
     iXLen %4, iXLen 1)
@@ -674,11 +674,11 @@ entry:
 
 declare <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
+  ptr,
   <vscale x 16 x i32>,
   iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32>* %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -687,7 +687,7 @@ define <vscale x 16 x i32> @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i32(<vscal
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i32(
     <vscale x 16 x i32> undef,
-    <vscale x 16 x i32>* %0,
+    ptr %0,
     <vscale x 16 x i32> %1,
     iXLen %2)
 
@@ -696,13 +696,13 @@ entry:
 
 declare <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
+  ptr,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -711,7 +711,7 @@ define <vscale x 16 x i32> @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
+    ptr %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
     iXLen %4, iXLen 1)
@@ -721,11 +721,11 @@ entry:
 
 declare <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i32(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   <vscale x 1 x i32>,
   iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64>* %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -735,7 +735,7 @@ define <vscale x 1 x i64> @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i32(<vscale x
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i32(
     <vscale x 1 x i64> undef,
-    <vscale x 1 x i64>* %0,
+    ptr %0,
     <vscale x 1 x i32> %1,
     iXLen %2)
 
@@ -744,13 +744,13 @@ entry:
 
 declare <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -759,7 +759,7 @@ define <vscale x 1 x i64> @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vsca
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
+    ptr %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -769,11 +769,11 @@ entry:
 
 declare <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i32(
   <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
+  ptr,
   <vscale x 2 x i32>,
   iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64>* %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -783,7 +783,7 @@ define <vscale x 2 x i64> @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i32(<vscale x
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i32(
     <vscale x 2 x i64> undef,
-    <vscale x 2 x i64>* %0,
+    ptr %0,
     <vscale x 2 x i32> %1,
     iXLen %2)
 
@@ -792,13 +792,13 @@ entry:
 
 declare <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32(
   <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
+  ptr,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -807,7 +807,7 @@ define <vscale x 2 x i64> @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vsca
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
+    ptr %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
     iXLen %4, iXLen 1)
@@ -817,11 +817,11 @@ entry:
 
 declare <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i32(
   <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
+  ptr,
   <vscale x 4 x i32>,
   iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64>* %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -831,7 +831,7 @@ define <vscale x 4 x i64> @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i32(<vscale x
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i32(
     <vscale x 4 x i64> undef,
-    <vscale x 4 x i64>* %0,
+    ptr %0,
     <vscale x 4 x i32> %1,
     iXLen %2)
 
@@ -840,13 +840,13 @@ entry:
 
 declare <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32(
   <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
+  ptr,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -855,7 +855,7 @@ define <vscale x 4 x i64> @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vsca
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
+    ptr %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
     iXLen %4, iXLen 1)
@@ -865,11 +865,11 @@ entry:
 
 declare <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i32(
   <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
+  ptr,
   <vscale x 8 x i32>,
   iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64>* %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -879,7 +879,7 @@ define <vscale x 8 x i64> @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i32(<vscale x
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i32(
     <vscale x 8 x i64> undef,
-    <vscale x 8 x i64>* %0,
+    ptr %0,
     <vscale x 8 x i32> %1,
     iXLen %2)
 
@@ -888,13 +888,13 @@ entry:
 
 declare <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32(
   <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
+  ptr,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -903,7 +903,7 @@ define <vscale x 8 x i64> @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vsca
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
+    ptr %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
     iXLen %4, iXLen 1)
@@ -913,11 +913,11 @@ entry:
 
 declare <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i32(
   <vscale x 1 x half>,
-  <vscale x 1 x half>*,
+  ptr,
   <vscale x 1 x i32>,
   iXLen);
 
-define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half>* %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -927,7 +927,7 @@ define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i32(<vscale x
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i32(
     <vscale x 1 x half> undef,
-    <vscale x 1 x half>* %0,
+    ptr %0,
     <vscale x 1 x i32> %1,
     iXLen %2)
 
@@ -936,13 +936,13 @@ entry:
 
 declare <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i32(
   <vscale x 1 x half>,
-  <vscale x 1 x half>*,
+  ptr,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -951,7 +951,7 @@ define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vsc
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i32(
     <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
+    ptr %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -961,11 +961,11 @@ entry:
 
 declare <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i32(
   <vscale x 2 x half>,
-  <vscale x 2 x half>*,
+  ptr,
   <vscale x 2 x i32>,
   iXLen);
 
-define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half>* %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -975,7 +975,7 @@ define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i32(<vscale x
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i32(
     <vscale x 2 x half> undef,
-    <vscale x 2 x half>* %0,
+    ptr %0,
     <vscale x 2 x i32> %1,
     iXLen %2)
 
@@ -984,13 +984,13 @@ entry:
 
 declare <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i32(
   <vscale x 2 x half>,
-  <vscale x 2 x half>*,
+  ptr,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -999,7 +999,7 @@ define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vsc
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i32(
     <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
+    ptr %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1009,11 +1009,11 @@ entry:
 
 declare <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i32(
   <vscale x 4 x half>,
-  <vscale x 4 x half>*,
+  ptr,
   <vscale x 4 x i32>,
   iXLen);
 
-define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half>* %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1023,7 +1023,7 @@ define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i32(<vscale x
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i32(
     <vscale x 4 x half> undef,
-    <vscale x 4 x half>* %0,
+    ptr %0,
     <vscale x 4 x i32> %1,
     iXLen %2)
 
@@ -1032,13 +1032,13 @@ entry:
 
 declare <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i32(
   <vscale x 4 x half>,
-  <vscale x 4 x half>*,
+  ptr,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1047,7 +1047,7 @@ define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vsc
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i32(
     <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
+    ptr %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1057,11 +1057,11 @@ entry:
 
 declare <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i32(
   <vscale x 8 x half>,
-  <vscale x 8 x half>*,
+  ptr,
   <vscale x 8 x i32>,
   iXLen);
 
-define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half>* %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1071,7 +1071,7 @@ define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i32(<vscale x
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i32(
     <vscale x 8 x half> undef,
-    <vscale x 8 x half>* %0,
+    ptr %0,
     <vscale x 8 x i32> %1,
     iXLen %2)
 
@@ -1080,13 +1080,13 @@ entry:
 
 declare <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i32(
   <vscale x 8 x half>,
-  <vscale x 8 x half>*,
+  ptr,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1095,7 +1095,7 @@ define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vsc
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i32(
     <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
+    ptr %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1105,11 +1105,11 @@ entry:
 
 declare <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i32(
   <vscale x 16 x half>,
-  <vscale x 16 x half>*,
+  ptr,
   <vscale x 16 x i32>,
   iXLen);
 
-define <vscale x 16 x half> @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half>* %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1119,7 +1119,7 @@ define <vscale x 16 x half> @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i32(<vsca
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i32(
     <vscale x 16 x half> undef,
-    <vscale x 16 x half>* %0,
+    ptr %0,
     <vscale x 16 x i32> %1,
     iXLen %2)
 
@@ -1128,13 +1128,13 @@ entry:
 
 declare <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i32(
   <vscale x 16 x half>,
-  <vscale x 16 x half>*,
+  ptr,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 16 x half> @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1143,7 +1143,7 @@ define <vscale x 16 x half> @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i32(
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i32(
     <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
+    ptr %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1153,11 +1153,11 @@ entry:
 
 declare <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i32(
   <vscale x 1 x float>,
-  <vscale x 1 x float>*,
+  ptr,
   <vscale x 1 x i32>,
   iXLen);
 
-define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float>* %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1166,7 +1166,7 @@ define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i32(<vscale
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i32(
     <vscale x 1 x float> undef,
-    <vscale x 1 x float>* %0,
+    ptr %0,
     <vscale x 1 x i32> %1,
     iXLen %2)
 
@@ -1175,13 +1175,13 @@ entry:
 
 declare <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32(
   <vscale x 1 x float>,
-  <vscale x 1 x float>*,
+  ptr,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -1190,7 +1190,7 @@ define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vs
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32(
     <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
+    ptr %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1200,11 +1200,11 @@ entry:
 
 declare <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i32(
   <vscale x 2 x float>,
-  <vscale x 2 x float>*,
+  ptr,
   <vscale x 2 x i32>,
   iXLen);
 
-define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float>* %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1213,7 +1213,7 @@ define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i32(<vscale
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i32(
     <vscale x 2 x float> undef,
-    <vscale x 2 x float>* %0,
+    ptr %0,
     <vscale x 2 x i32> %1,
     iXLen %2)
 
@@ -1222,13 +1222,13 @@ entry:
 
 declare <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32(
   <vscale x 2 x float>,
-  <vscale x 2 x float>*,
+  ptr,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -1237,7 +1237,7 @@ define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vs
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32(
     <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
+    ptr %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1247,11 +1247,11 @@ entry:
 
 declare <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i32(
   <vscale x 4 x float>,
-  <vscale x 4 x float>*,
+  ptr,
   <vscale x 4 x i32>,
   iXLen);
 
-define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float>* %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1260,7 +1260,7 @@ define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i32(<vscale
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i32(
     <vscale x 4 x float> undef,
-    <vscale x 4 x float>* %0,
+    ptr %0,
     <vscale x 4 x i32> %1,
     iXLen %2)
 
@@ -1269,13 +1269,13 @@ entry:
 
 declare <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32(
   <vscale x 4 x float>,
-  <vscale x 4 x float>*,
+  ptr,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -1284,7 +1284,7 @@ define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vs
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32(
     <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
+    ptr %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1294,11 +1294,11 @@ entry:
 
 declare <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i32(
   <vscale x 8 x float>,
-  <vscale x 8 x float>*,
+  ptr,
   <vscale x 8 x i32>,
   iXLen);
 
-define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float>* %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1307,7 +1307,7 @@ define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i32(<vscale
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i32(
     <vscale x 8 x float> undef,
-    <vscale x 8 x float>* %0,
+    ptr %0,
     <vscale x 8 x i32> %1,
     iXLen %2)
 
@@ -1316,13 +1316,13 @@ entry:
 
 declare <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32(
   <vscale x 8 x float>,
-  <vscale x 8 x float>*,
+  ptr,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -1331,7 +1331,7 @@ define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vs
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32(
     <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
+    ptr %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1341,11 +1341,11 @@ entry:
 
 declare <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i32(
   <vscale x 16 x float>,
-  <vscale x 16 x float>*,
+  ptr,
   <vscale x 16 x i32>,
   iXLen);
 
-define <vscale x 16 x float> @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float>* %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -1354,7 +1354,7 @@ define <vscale x 16 x float> @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i32(<vsc
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i32(
     <vscale x 16 x float> undef,
-    <vscale x 16 x float>* %0,
+    ptr %0,
     <vscale x 16 x i32> %1,
     iXLen %2)
 
@@ -1363,13 +1363,13 @@ entry:
 
 declare <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32(
   <vscale x 16 x float>,
-  <vscale x 16 x float>*,
+  ptr,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 16 x float> @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -1378,7 +1378,7 @@ define <vscale x 16 x float> @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i32
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32(
     <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
+    ptr %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1388,11 +1388,11 @@ entry:
 
 declare <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i32(
   <vscale x 1 x double>,
-  <vscale x 1 x double>*,
+  ptr,
   <vscale x 1 x i32>,
   iXLen);
 
-define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double>* %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -1402,7 +1402,7 @@ define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i32(<vscale
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i32(
     <vscale x 1 x double> undef,
-    <vscale x 1 x double>* %0,
+    ptr %0,
     <vscale x 1 x i32> %1,
     iXLen %2)
 
@@ -1411,13 +1411,13 @@ entry:
 
 declare <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32(
   <vscale x 1 x double>,
-  <vscale x 1 x double>*,
+  ptr,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -1426,7 +1426,7 @@ define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<v
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32(
     <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
+    ptr %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1436,11 +1436,11 @@ entry:
 
 declare <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i32(
   <vscale x 2 x double>,
-  <vscale x 2 x double>*,
+  ptr,
   <vscale x 2 x i32>,
   iXLen);
 
-define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double>* %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -1450,7 +1450,7 @@ define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i32(<vscale
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i32(
     <vscale x 2 x double> undef,
-    <vscale x 2 x double>* %0,
+    ptr %0,
     <vscale x 2 x i32> %1,
     iXLen %2)
 
@@ -1459,13 +1459,13 @@ entry:
 
 declare <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32(
   <vscale x 2 x double>,
-  <vscale x 2 x double>*,
+  ptr,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -1474,7 +1474,7 @@ define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<v
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32(
     <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
+    ptr %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1484,11 +1484,11 @@ entry:
 
 declare <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i32(
   <vscale x 4 x double>,
-  <vscale x 4 x double>*,
+  ptr,
   <vscale x 4 x i32>,
   iXLen);
 
-define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double>* %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -1498,7 +1498,7 @@ define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i32(<vscale
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i32(
     <vscale x 4 x double> undef,
-    <vscale x 4 x double>* %0,
+    ptr %0,
     <vscale x 4 x i32> %1,
     iXLen %2)
 
@@ -1507,13 +1507,13 @@ entry:
 
 declare <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32(
   <vscale x 4 x double>,
-  <vscale x 4 x double>*,
+  ptr,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -1522,7 +1522,7 @@ define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<v
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32(
     <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
+    ptr %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1532,11 +1532,11 @@ entry:
 
 declare <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i32(
   <vscale x 8 x double>,
-  <vscale x 8 x double>*,
+  ptr,
   <vscale x 8 x i32>,
   iXLen);
 
-define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double>* %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -1546,7 +1546,7 @@ define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i32(<vscale
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i32(
     <vscale x 8 x double> undef,
-    <vscale x 8 x double>* %0,
+    ptr %0,
     <vscale x 8 x i32> %1,
     iXLen %2)
 
@@ -1555,13 +1555,13 @@ entry:
 
 declare <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32(
   <vscale x 8 x double>,
-  <vscale x 8 x double>*,
+  ptr,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -1570,7 +1570,7 @@ define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<v
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32(
     <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
+    ptr %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1580,11 +1580,11 @@ entry:
 
 declare <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i16(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   <vscale x 1 x i16>,
   iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8>* %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -1594,7 +1594,7 @@ define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i16(
     <vscale x 1 x i8> undef,
-    <vscale x 1 x i8>* %0,
+    ptr %0,
     <vscale x 1 x i16> %1,
     iXLen %2)
 
@@ -1603,13 +1603,13 @@ entry:
 
 declare <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -1618,7 +1618,7 @@ define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
+    ptr %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1628,11 +1628,11 @@ entry:
 
 declare <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i16(
   <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
+  ptr,
   <vscale x 2 x i16>,
   iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8>* %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -1642,7 +1642,7 @@ define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i16(
     <vscale x 2 x i8> undef,
-    <vscale x 2 x i8>* %0,
+    ptr %0,
     <vscale x 2 x i16> %1,
     iXLen %2)
 
@@ -1651,13 +1651,13 @@ entry:
 
 declare <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16(
   <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
+  ptr,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -1666,7 +1666,7 @@ define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
+    ptr %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1676,11 +1676,11 @@ entry:
 
 declare <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i16(
   <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
+  ptr,
   <vscale x 4 x i16>,
   iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8>* %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -1690,7 +1690,7 @@ define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i16(
     <vscale x 4 x i8> undef,
-    <vscale x 4 x i8>* %0,
+    ptr %0,
     <vscale x 4 x i16> %1,
     iXLen %2)
 
@@ -1699,13 +1699,13 @@ entry:
 
 declare <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16(
   <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
+  ptr,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -1714,7 +1714,7 @@ define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
+    ptr %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1724,11 +1724,11 @@ entry:
 
 declare <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i16(
   <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
+  ptr,
   <vscale x 8 x i16>,
   iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8>* %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -1738,7 +1738,7 @@ define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i16(
     <vscale x 8 x i8> undef,
-    <vscale x 8 x i8>* %0,
+    ptr %0,
     <vscale x 8 x i16> %1,
     iXLen %2)
 
@@ -1747,13 +1747,13 @@ entry:
 
 declare <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16(
   <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
+  ptr,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -1762,7 +1762,7 @@ define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
+    ptr %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1772,11 +1772,11 @@ entry:
 
 declare <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i16(
   <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
+  ptr,
   <vscale x 16 x i16>,
   iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8>* %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -1786,7 +1786,7 @@ define <vscale x 16 x i8> @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i16(<vscale x
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i16(
     <vscale x 16 x i8> undef,
-    <vscale x 16 x i8>* %0,
+    ptr %0,
     <vscale x 16 x i16> %1,
     iXLen %2)
 
@@ -1795,13 +1795,13 @@ entry:
 
 declare <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16(
   <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
+  ptr,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -1810,7 +1810,7 @@ define <vscale x 16 x i8> @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vsc
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
+    ptr %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1820,11 +1820,11 @@ entry:
 
 declare <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i16(
   <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
+  ptr,
   <vscale x 32 x i16>,
   iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8>* %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i16(ptr %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -1834,7 +1834,7 @@ define <vscale x 32 x i8> @intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i16(<vscale x
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i16(
     <vscale x 32 x i8> undef,
-    <vscale x 32 x i8>* %0,
+    ptr %0,
     <vscale x 32 x i16> %1,
     iXLen %2)
 
@@ -1843,13 +1843,13 @@ entry:
 
 declare <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16(
   <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
+  ptr,
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -1858,7 +1858,7 @@ define <vscale x 32 x i8> @intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vsc
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
-    <vscale x 32 x i8>* %1,
+    ptr %1,
     <vscale x 32 x i16> %2,
     <vscale x 32 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1868,11 +1868,11 @@ entry:
 
 declare <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
+  ptr,
   <vscale x 1 x i16>,
   iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16>* %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1881,7 +1881,7 @@ define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i16(<vscale x
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i16(
     <vscale x 1 x i16> undef,
-    <vscale x 1 x i16>* %0,
+    ptr %0,
     <vscale x 1 x i16> %1,
     iXLen %2)
 
@@ -1890,13 +1890,13 @@ entry:
 
 declare <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
+  ptr,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1905,7 +1905,7 @@ define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vsca
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
+    ptr %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1915,11 +1915,11 @@ entry:
 
 declare <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
+  ptr,
   <vscale x 2 x i16>,
   iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16>* %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1928,7 +1928,7 @@ define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i16(<vscale x
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i16(
     <vscale x 2 x i16> undef,
-    <vscale x 2 x i16>* %0,
+    ptr %0,
     <vscale x 2 x i16> %1,
     iXLen %2)
 
@@ -1937,13 +1937,13 @@ entry:
 
 declare <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
+  ptr,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1952,7 +1952,7 @@ define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vsca
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
+    ptr %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1962,11 +1962,11 @@ entry:
 
 declare <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
+  ptr,
   <vscale x 4 x i16>,
   iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16>* %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1975,7 +1975,7 @@ define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i16(<vscale x
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i16(
     <vscale x 4 x i16> undef,
-    <vscale x 4 x i16>* %0,
+    ptr %0,
     <vscale x 4 x i16> %1,
     iXLen %2)
 
@@ -1984,13 +1984,13 @@ entry:
 
 declare <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
+  ptr,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1999,7 +1999,7 @@ define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vsca
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
+    ptr %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
     iXLen %4, iXLen 1)
@@ -2009,11 +2009,11 @@ entry:
 
 declare <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
+  ptr,
   <vscale x 8 x i16>,
   iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16>* %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -2022,7 +2022,7 @@ define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i16(<vscale x
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i16(
     <vscale x 8 x i16> undef,
-    <vscale x 8 x i16>* %0,
+    ptr %0,
     <vscale x 8 x i16> %1,
     iXLen %2)
 
@@ -2031,13 +2031,13 @@ entry:
 
 declare <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
+  ptr,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -2046,7 +2046,7 @@ define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vsca
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
+    ptr %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
     iXLen %4, iXLen 1)
@@ -2056,11 +2056,11 @@ entry:
 
 declare <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
+  ptr,
   <vscale x 16 x i16>,
   iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16>* %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -2069,7 +2069,7 @@ define <vscale x 16 x i16> @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i16(<vscal
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i16(
     <vscale x 16 x i16> undef,
-    <vscale x 16 x i16>* %0,
+    ptr %0,
     <vscale x 16 x i16> %1,
     iXLen %2)
 
@@ -2078,13 +2078,13 @@ entry:
 
 declare <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
+  ptr,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -2093,7 +2093,7 @@ define <vscale x 16 x i16> @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
+    ptr %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
     iXLen %4, iXLen 1)
@@ -2103,11 +2103,11 @@ entry:
 
 declare <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
+  ptr,
   <vscale x 32 x i16>,
   iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16>* %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i16(ptr %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -2116,7 +2116,7 @@ define <vscale x 32 x i16> @intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i16(<vscal
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i16(
     <vscale x 32 x i16> undef,
-    <vscale x 32 x i16>* %0,
+    ptr %0,
     <vscale x 32 x i16> %1,
     iXLen %2)
 
@@ -2125,13 +2125,13 @@ entry:
 
 declare <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
+  ptr,
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -2140,7 +2140,7 @@ define <vscale x 32 x i16> @intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
-    <vscale x 32 x i16>* %1,
+    ptr %1,
     <vscale x 32 x i16> %2,
     <vscale x 32 x i1> %3,
     iXLen %4, iXLen 1)
@@ -2150,11 +2150,11 @@ entry:
 
 declare <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i16(
   <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
+  ptr,
   <vscale x 1 x i16>,
   iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32>* %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -2164,7 +2164,7 @@ define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i16(
     <vscale x 1 x i32> undef,
-    <vscale x 1 x i32>* %0,
+    ptr %0,
     <vscale x 1 x i16> %1,
     iXLen %2)
 
@@ -2173,13 +2173,13 @@ entry:
 
 declare <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16(
   <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
+  ptr,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -2188,7 +2188,7 @@ define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vsca
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
+    ptr %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -2198,11 +2198,11 @@ entry:
 
 declare <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i16(
   <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
+  ptr,
   <vscale x 2 x i16>,
   iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32>* %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -2212,7 +2212,7 @@ define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i16(<vscale x
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i16(
     <vscale x 2 x i32> undef,
-    <vscale x 2 x i32>* %0,
+    ptr %0,
     <vscale x 2 x i16> %1,
     iXLen %2)
 
@@ -2221,13 +2221,13 @@ entry:
 
 declare <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16(
   <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
+  ptr,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -2236,7 +2236,7 @@ define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vsca
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
+    ptr %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
     iXLen %4, iXLen 1)
@@ -2246,11 +2246,11 @@ entry:
 
 declare <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i16(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   <vscale x 4 x i16>,
   iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32>* %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -2260,7 +2260,7 @@ define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i16(<vscale x
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i16(
     <vscale x 4 x i32> undef,
-    <vscale x 4 x i32>* %0,
+    ptr %0,
     <vscale x 4 x i16> %1,
     iXLen %2)
 
@@ -2269,13 +2269,13 @@ entry:
 
 declare <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -2284,7 +2284,7 @@ define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vsca
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
+    ptr %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
     iXLen %4, iXLen 1)
@@ -2294,11 +2294,11 @@ entry:
 
 declare <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i16(
   <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
+  ptr,
   <vscale x 8 x i16>,
   iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32>* %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -2308,7 +2308,7 @@ define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i16(<vscale x
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i16(
     <vscale x 8 x i32> undef,
-    <vscale x 8 x i32>* %0,
+    ptr %0,
     <vscale x 8 x i16> %1,
     iXLen %2)
 
@@ -2317,13 +2317,13 @@ entry:
 
 declare <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16(
   <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
+  ptr,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -2332,7 +2332,7 @@ define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vsca
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
+    ptr %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
     iXLen %4, iXLen 1)
@@ -2342,11 +2342,11 @@ entry:
 
 declare <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i16(
   <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
+  ptr,
   <vscale x 16 x i16>,
   iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32>* %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -2356,7 +2356,7 @@ define <vscale x 16 x i32> @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i16(<vscal
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i16(
     <vscale x 16 x i32> undef,
-    <vscale x 16 x i32>* %0,
+    ptr %0,
     <vscale x 16 x i16> %1,
     iXLen %2)
 
@@ -2365,13 +2365,13 @@ entry:
 
 declare <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16(
   <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
+  ptr,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -2380,7 +2380,7 @@ define <vscale x 16 x i32> @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
+    ptr %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
     iXLen %4, iXLen 1)
@@ -2390,11 +2390,11 @@ entry:
 
 declare <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i16(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   <vscale x 1 x i16>,
   iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64>* %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -2404,7 +2404,7 @@ define <vscale x 1 x i64> @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i16(<vscale x
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i16(
     <vscale x 1 x i64> undef,
-    <vscale x 1 x i64>* %0,
+    ptr %0,
     <vscale x 1 x i16> %1,
     iXLen %2)
 
@@ -2413,13 +2413,13 @@ entry:
 
 declare <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -2428,7 +2428,7 @@ define <vscale x 1 x i64> @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vsca
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
+    ptr %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -2438,11 +2438,11 @@ entry:
 
 declare <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i16(
   <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
+  ptr,
   <vscale x 2 x i16>,
   iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64>* %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -2452,7 +2452,7 @@ define <vscale x 2 x i64> @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i16(<vscale x
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i16(
     <vscale x 2 x i64> undef,
-    <vscale x 2 x i64>* %0,
+    ptr %0,
     <vscale x 2 x i16> %1,
     iXLen %2)
 
@@ -2461,13 +2461,13 @@ entry:
 
 declare <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16(
   <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
+  ptr,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -2476,7 +2476,7 @@ define <vscale x 2 x i64> @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vsca
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
+    ptr %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
     iXLen %4, iXLen 1)
@@ -2486,11 +2486,11 @@ entry:
 
 declare <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i16(
   <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
+  ptr,
   <vscale x 4 x i16>,
   iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64>* %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -2500,7 +2500,7 @@ define <vscale x 4 x i64> @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i16(<vscale x
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i16(
     <vscale x 4 x i64> undef,
-    <vscale x 4 x i64>* %0,
+    ptr %0,
     <vscale x 4 x i16> %1,
     iXLen %2)
 
@@ -2509,13 +2509,13 @@ entry:
 
 declare <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16(
   <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
+  ptr,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -2524,7 +2524,7 @@ define <vscale x 4 x i64> @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vsca
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
+    ptr %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
     iXLen %4, iXLen 1)
@@ -2534,11 +2534,11 @@ entry:
 
 declare <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i16(
   <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
+  ptr,
   <vscale x 8 x i16>,
   iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64>* %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -2548,7 +2548,7 @@ define <vscale x 8 x i64> @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i16(<vscale x
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i16(
     <vscale x 8 x i64> undef,
-    <vscale x 8 x i64>* %0,
+    ptr %0,
     <vscale x 8 x i16> %1,
     iXLen %2)
 
@@ -2557,13 +2557,13 @@ entry:
 
 declare <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16(
   <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
+  ptr,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -2572,7 +2572,7 @@ define <vscale x 8 x i64> @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vsca
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
+    ptr %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
     iXLen %4, iXLen 1)
@@ -2582,11 +2582,11 @@ entry:
 
 declare <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i16(
   <vscale x 1 x half>,
-  <vscale x 1 x half>*,
+  ptr,
   <vscale x 1 x i16>,
   iXLen);
 
-define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half>* %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -2595,7 +2595,7 @@ define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i16(<vscale x
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i16(
     <vscale x 1 x half> undef,
-    <vscale x 1 x half>* %0,
+    ptr %0,
     <vscale x 1 x i16> %1,
     iXLen %2)
 
@@ -2604,13 +2604,13 @@ entry:
 
 declare <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i16(
   <vscale x 1 x half>,
-  <vscale x 1 x half>*,
+  ptr,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -2619,7 +2619,7 @@ define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vsc
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i16(
     <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
+    ptr %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -2629,11 +2629,11 @@ entry:
 
 declare <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i16(
   <vscale x 2 x half>,
-  <vscale x 2 x half>*,
+  ptr,
   <vscale x 2 x i16>,
   iXLen);
 
-define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half>* %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -2642,7 +2642,7 @@ define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i16(<vscale x
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i16(
     <vscale x 2 x half> undef,
-    <vscale x 2 x half>* %0,
+    ptr %0,
     <vscale x 2 x i16> %1,
     iXLen %2)
 
@@ -2651,13 +2651,13 @@ entry:
 
 declare <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i16(
   <vscale x 2 x half>,
-  <vscale x 2 x half>*,
+  ptr,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -2666,7 +2666,7 @@ define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vsc
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i16(
     <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
+    ptr %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
     iXLen %4, iXLen 1)
@@ -2676,11 +2676,11 @@ entry:
 
 declare <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i16(
   <vscale x 4 x half>,
-  <vscale x 4 x half>*,
+  ptr,
   <vscale x 4 x i16>,
   iXLen);
 
-define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half>* %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -2689,7 +2689,7 @@ define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i16(<vscale x
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i16(
     <vscale x 4 x half> undef,
-    <vscale x 4 x half>* %0,
+    ptr %0,
     <vscale x 4 x i16> %1,
     iXLen %2)
 
@@ -2698,13 +2698,13 @@ entry:
 
 declare <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i16(
   <vscale x 4 x half>,
-  <vscale x 4 x half>*,
+  ptr,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -2713,7 +2713,7 @@ define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vsc
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i16(
     <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
+    ptr %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
     iXLen %4, iXLen 1)
@@ -2723,11 +2723,11 @@ entry:
 
 declare <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i16(
   <vscale x 8 x half>,
-  <vscale x 8 x half>*,
+  ptr,
   <vscale x 8 x i16>,
   iXLen);
 
-define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half>* %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -2736,7 +2736,7 @@ define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i16(<vscale x
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i16(
     <vscale x 8 x half> undef,
-    <vscale x 8 x half>* %0,
+    ptr %0,
     <vscale x 8 x i16> %1,
     iXLen %2)
 
@@ -2745,13 +2745,13 @@ entry:
 
 declare <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i16(
   <vscale x 8 x half>,
-  <vscale x 8 x half>*,
+  ptr,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -2760,7 +2760,7 @@ define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vsc
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i16(
     <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
+    ptr %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
     iXLen %4, iXLen 1)
@@ -2770,11 +2770,11 @@ entry:
 
 declare <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i16(
   <vscale x 16 x half>,
-  <vscale x 16 x half>*,
+  ptr,
   <vscale x 16 x i16>,
   iXLen);
 
-define <vscale x 16 x half> @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half>* %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -2783,7 +2783,7 @@ define <vscale x 16 x half> @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i16(<vsca
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i16(
     <vscale x 16 x half> undef,
-    <vscale x 16 x half>* %0,
+    ptr %0,
     <vscale x 16 x i16> %1,
     iXLen %2)
 
@@ -2792,13 +2792,13 @@ entry:
 
 declare <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i16(
   <vscale x 16 x half>,
-  <vscale x 16 x half>*,
+  ptr,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 16 x half> @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -2807,7 +2807,7 @@ define <vscale x 16 x half> @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i16(
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i16(
     <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
+    ptr %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
     iXLen %4, iXLen 1)
@@ -2817,11 +2817,11 @@ entry:
 
 declare <vscale x 32 x half> @llvm.riscv.vloxei.nxv32f16.nxv32i16(
   <vscale x 32 x half>,
-  <vscale x 32 x half>*,
+  ptr,
   <vscale x 32 x i16>,
   iXLen);
 
-define <vscale x 32 x half> @intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half>* %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
+define <vscale x 32 x half> @intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i16(ptr %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -2830,7 +2830,7 @@ define <vscale x 32 x half> @intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i16(<vsca
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vloxei.nxv32f16.nxv32i16(
     <vscale x 32 x half> undef,
-    <vscale x 32 x half>* %0,
+    ptr %0,
     <vscale x 32 x i16> %1,
     iXLen %2)
 
@@ -2839,13 +2839,13 @@ entry:
 
 declare <vscale x 32 x half> @llvm.riscv.vloxei.mask.nxv32f16.nxv32i16(
   <vscale x 32 x half>,
-  <vscale x 32 x half>*,
+  ptr,
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 32 x half> @intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+define <vscale x 32 x half> @intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -2854,7 +2854,7 @@ define <vscale x 32 x half> @intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i16(
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vloxei.mask.nxv32f16.nxv32i16(
     <vscale x 32 x half> %0,
-    <vscale x 32 x half>* %1,
+    ptr %1,
     <vscale x 32 x i16> %2,
     <vscale x 32 x i1> %3,
     iXLen %4, iXLen 1)
@@ -2864,11 +2864,11 @@ entry:
 
 declare <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i16(
   <vscale x 1 x float>,
-  <vscale x 1 x float>*,
+  ptr,
   <vscale x 1 x i16>,
   iXLen);
 
-define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float>* %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -2878,7 +2878,7 @@ define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i16(<vscale
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i16(
     <vscale x 1 x float> undef,
-    <vscale x 1 x float>* %0,
+    ptr %0,
     <vscale x 1 x i16> %1,
     iXLen %2)
 
@@ -2887,13 +2887,13 @@ entry:
 
 declare <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16(
   <vscale x 1 x float>,
-  <vscale x 1 x float>*,
+  ptr,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -2902,7 +2902,7 @@ define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vs
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16(
     <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
+    ptr %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -2912,11 +2912,11 @@ entry:
 
 declare <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i16(
   <vscale x 2 x float>,
-  <vscale x 2 x float>*,
+  ptr,
   <vscale x 2 x i16>,
   iXLen);
 
-define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float>* %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -2926,7 +2926,7 @@ define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i16(<vscale
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i16(
     <vscale x 2 x float> undef,
-    <vscale x 2 x float>* %0,
+    ptr %0,
     <vscale x 2 x i16> %1,
     iXLen %2)
 
@@ -2935,13 +2935,13 @@ entry:
 
 declare <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16(
   <vscale x 2 x float>,
-  <vscale x 2 x float>*,
+  ptr,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -2950,7 +2950,7 @@ define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vs
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16(
     <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
+    ptr %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
     iXLen %4, iXLen 1)
@@ -2960,11 +2960,11 @@ entry:
 
 declare <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i16(
   <vscale x 4 x float>,
-  <vscale x 4 x float>*,
+  ptr,
   <vscale x 4 x i16>,
   iXLen);
 
-define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float>* %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -2974,7 +2974,7 @@ define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i16(<vscale
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i16(
     <vscale x 4 x float> undef,
-    <vscale x 4 x float>* %0,
+    ptr %0,
     <vscale x 4 x i16> %1,
     iXLen %2)
 
@@ -2983,13 +2983,13 @@ entry:
 
 declare <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16(
   <vscale x 4 x float>,
-  <vscale x 4 x float>*,
+  ptr,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -2998,7 +2998,7 @@ define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vs
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16(
     <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
+    ptr %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
     iXLen %4, iXLen 1)
@@ -3008,11 +3008,11 @@ entry:
 
 declare <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i16(
   <vscale x 8 x float>,
-  <vscale x 8 x float>*,
+  ptr,
   <vscale x 8 x i16>,
   iXLen);
 
-define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float>* %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -3022,7 +3022,7 @@ define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i16(<vscale
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i16(
     <vscale x 8 x float> undef,
-    <vscale x 8 x float>* %0,
+    ptr %0,
     <vscale x 8 x i16> %1,
     iXLen %2)
 
@@ -3031,13 +3031,13 @@ entry:
 
 declare <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16(
   <vscale x 8 x float>,
-  <vscale x 8 x float>*,
+  ptr,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -3046,7 +3046,7 @@ define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vs
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16(
     <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
+    ptr %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
     iXLen %4, iXLen 1)
@@ -3056,11 +3056,11 @@ entry:
 
 declare <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i16(
   <vscale x 16 x float>,
-  <vscale x 16 x float>*,
+  ptr,
   <vscale x 16 x i16>,
   iXLen);
 
-define <vscale x 16 x float> @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float>* %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -3070,7 +3070,7 @@ define <vscale x 16 x float> @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i16(<vsc
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i16(
     <vscale x 16 x float> undef,
-    <vscale x 16 x float>* %0,
+    ptr %0,
     <vscale x 16 x i16> %1,
     iXLen %2)
 
@@ -3079,13 +3079,13 @@ entry:
 
 declare <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16(
   <vscale x 16 x float>,
-  <vscale x 16 x float>*,
+  ptr,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 16 x float> @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -3094,7 +3094,7 @@ define <vscale x 16 x float> @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i16
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16(
     <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
+    ptr %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
     iXLen %4, iXLen 1)
@@ -3104,11 +3104,11 @@ entry:
 
 declare <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i16(
   <vscale x 1 x double>,
-  <vscale x 1 x double>*,
+  ptr,
   <vscale x 1 x i16>,
   iXLen);
 
-define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double>* %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -3118,7 +3118,7 @@ define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i16(<vscale
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i16(
     <vscale x 1 x double> undef,
-    <vscale x 1 x double>* %0,
+    ptr %0,
     <vscale x 1 x i16> %1,
     iXLen %2)
 
@@ -3127,13 +3127,13 @@ entry:
 
 declare <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16(
   <vscale x 1 x double>,
-  <vscale x 1 x double>*,
+  ptr,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -3142,7 +3142,7 @@ define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<v
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16(
     <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
+    ptr %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -3152,11 +3152,11 @@ entry:
 
 declare <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i16(
   <vscale x 2 x double>,
-  <vscale x 2 x double>*,
+  ptr,
   <vscale x 2 x i16>,
   iXLen);
 
-define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double>* %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -3166,7 +3166,7 @@ define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i16(<vscale
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i16(
     <vscale x 2 x double> undef,
-    <vscale x 2 x double>* %0,
+    ptr %0,
     <vscale x 2 x i16> %1,
     iXLen %2)
 
@@ -3175,13 +3175,13 @@ entry:
 
 declare <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16(
   <vscale x 2 x double>,
-  <vscale x 2 x double>*,
+  ptr,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -3190,7 +3190,7 @@ define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<v
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16(
     <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
+    ptr %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
     iXLen %4, iXLen 1)
@@ -3200,11 +3200,11 @@ entry:
 
 declare <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i16(
   <vscale x 4 x double>,
-  <vscale x 4 x double>*,
+  ptr,
   <vscale x 4 x i16>,
   iXLen);
 
-define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double>* %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -3214,7 +3214,7 @@ define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i16(<vscale
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i16(
     <vscale x 4 x double> undef,
-    <vscale x 4 x double>* %0,
+    ptr %0,
     <vscale x 4 x i16> %1,
     iXLen %2)
 
@@ -3223,13 +3223,13 @@ entry:
 
 declare <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16(
   <vscale x 4 x double>,
-  <vscale x 4 x double>*,
+  ptr,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -3238,7 +3238,7 @@ define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<v
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16(
     <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
+    ptr %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
     iXLen %4, iXLen 1)
@@ -3248,11 +3248,11 @@ entry:
 
 declare <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i16(
   <vscale x 8 x double>,
-  <vscale x 8 x double>*,
+  ptr,
   <vscale x 8 x i16>,
   iXLen);
 
-define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double>* %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -3262,7 +3262,7 @@ define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i16(<vscale
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i16(
     <vscale x 8 x double> undef,
-    <vscale x 8 x double>* %0,
+    ptr %0,
     <vscale x 8 x i16> %1,
     iXLen %2)
 
@@ -3271,13 +3271,13 @@ entry:
 
 declare <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16(
   <vscale x 8 x double>,
-  <vscale x 8 x double>*,
+  ptr,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -3286,7 +3286,7 @@ define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<v
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16(
     <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
+    ptr %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
     iXLen %4, iXLen 1)
@@ -3296,11 +3296,11 @@ entry:
 
 declare <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   <vscale x 1 x i8>,
   iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8>* %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -3309,7 +3309,7 @@ define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i8(
     <vscale x 1 x i8> undef,
-    <vscale x 1 x i8>* %0,
+    ptr %0,
     <vscale x 1 x i8> %1,
     iXLen %2)
 
@@ -3318,13 +3318,13 @@ entry:
 
 declare <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -3333,7 +3333,7 @@ define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
+    ptr %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -3343,11 +3343,11 @@ entry:
 
 declare <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
+  ptr,
   <vscale x 2 x i8>,
   iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8>* %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -3356,7 +3356,7 @@ define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i8(
     <vscale x 2 x i8> undef,
-    <vscale x 2 x i8>* %0,
+    ptr %0,
     <vscale x 2 x i8> %1,
     iXLen %2)
 
@@ -3365,13 +3365,13 @@ entry:
 
 declare <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
+  ptr,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -3380,7 +3380,7 @@ define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
+    ptr %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
     iXLen %4, iXLen 1)
@@ -3390,11 +3390,11 @@ entry:
 
 declare <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
+  ptr,
   <vscale x 4 x i8>,
   iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8>* %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -3403,7 +3403,7 @@ define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i8(
     <vscale x 4 x i8> undef,
-    <vscale x 4 x i8>* %0,
+    ptr %0,
     <vscale x 4 x i8> %1,
     iXLen %2)
 
@@ -3412,13 +3412,13 @@ entry:
 
 declare <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
+  ptr,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -3427,7 +3427,7 @@ define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
+    ptr %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
     iXLen %4, iXLen 1)
@@ -3437,11 +3437,11 @@ entry:
 
 declare <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
+  ptr,
   <vscale x 8 x i8>,
   iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8>* %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -3450,7 +3450,7 @@ define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i8(
     <vscale x 8 x i8> undef,
-    <vscale x 8 x i8>* %0,
+    ptr %0,
     <vscale x 8 x i8> %1,
     iXLen %2)
 
@@ -3459,13 +3459,13 @@ entry:
 
 declare <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
+  ptr,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -3474,7 +3474,7 @@ define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
+    ptr %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
     iXLen %4, iXLen 1)
@@ -3484,11 +3484,11 @@ entry:
 
 declare <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
+  ptr,
   <vscale x 16 x i8>,
   iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8>* %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -3497,7 +3497,7 @@ define <vscale x 16 x i8> @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i8(<vscale x
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i8(
     <vscale x 16 x i8> undef,
-    <vscale x 16 x i8>* %0,
+    ptr %0,
     <vscale x 16 x i8> %1,
     iXLen %2)
 
@@ -3506,13 +3506,13 @@ entry:
 
 declare <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
+  ptr,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -3521,7 +3521,7 @@ define <vscale x 16 x i8> @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vsca
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
+    ptr %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
     iXLen %4, iXLen 1)
@@ -3531,11 +3531,11 @@ entry:
 
 declare <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
+  ptr,
   <vscale x 32 x i8>,
   iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8>* %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i8(ptr %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -3544,7 +3544,7 @@ define <vscale x 32 x i8> @intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i8(<vscale x
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i8(
     <vscale x 32 x i8> undef,
-    <vscale x 32 x i8>* %0,
+    ptr %0,
     <vscale x 32 x i8> %1,
     iXLen %2)
 
@@ -3553,13 +3553,13 @@ entry:
 
 declare <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
+  ptr,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -3568,7 +3568,7 @@ define <vscale x 32 x i8> @intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vsca
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
-    <vscale x 32 x i8>* %1,
+    ptr %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
     iXLen %4, iXLen 1)
@@ -3578,11 +3578,11 @@ entry:
 
 declare <vscale x 64 x i8> @llvm.riscv.vloxei.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
-  <vscale x 64 x i8>*,
+  ptr,
   <vscale x 64 x i8>,
   iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vloxei_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8>* %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vloxei_v_nxv64i8_nxv64i8_nxv64i8(ptr %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
@@ -3591,7 +3591,7 @@ define <vscale x 64 x i8> @intrinsic_vloxei_v_nxv64i8_nxv64i8_nxv64i8(<vscale x
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vloxei.nxv64i8.nxv64i8(
     <vscale x 64 x i8> undef,
-    <vscale x 64 x i8>* %0,
+    ptr %0,
     <vscale x 64 x i8> %1,
     iXLen %2)
 
@@ -3600,13 +3600,13 @@ entry:
 
 declare <vscale x 64 x i8> @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
-  <vscale x 64 x i8>*,
+  ptr,
   <vscale x 64 x i8>,
   <vscale x 64 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vloxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vloxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, ptr %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
@@ -3615,7 +3615,7 @@ define <vscale x 64 x i8> @intrinsic_vloxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vsca
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
-    <vscale x 64 x i8>* %1,
+    ptr %1,
     <vscale x 64 x i8> %2,
     <vscale x 64 x i1> %3,
     iXLen %4, iXLen 1)
@@ -3625,11 +3625,11 @@ entry:
 
 declare <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
+  ptr,
   <vscale x 1 x i8>,
   iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16>* %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -3639,7 +3639,7 @@ define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i8(
     <vscale x 1 x i16> undef,
-    <vscale x 1 x i16>* %0,
+    ptr %0,
     <vscale x 1 x i8> %1,
     iXLen %2)
 
@@ -3648,13 +3648,13 @@ entry:
 
 declare <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
+  ptr,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -3663,7 +3663,7 @@ define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscal
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
+    ptr %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -3673,11 +3673,11 @@ entry:
 
 declare <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i8(
   <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
+  ptr,
   <vscale x 2 x i8>,
   iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16>* %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -3687,7 +3687,7 @@ define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i8(
     <vscale x 2 x i16> undef,
-    <vscale x 2 x i16>* %0,
+    ptr %0,
     <vscale x 2 x i8> %1,
     iXLen %2)
 
@@ -3696,13 +3696,13 @@ entry:
 
 declare <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8(
   <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
+  ptr,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -3711,7 +3711,7 @@ define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscal
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
+    ptr %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
     iXLen %4, iXLen 1)
@@ -3721,11 +3721,11 @@ entry:
 
 declare <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i8(
   <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
+  ptr,
   <vscale x 4 x i8>,
   iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16>* %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -3735,7 +3735,7 @@ define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i8(
     <vscale x 4 x i16> undef,
-    <vscale x 4 x i16>* %0,
+    ptr %0,
     <vscale x 4 x i8> %1,
     iXLen %2)
 
@@ -3744,13 +3744,13 @@ entry:
 
 declare <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8(
   <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
+  ptr,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -3759,7 +3759,7 @@ define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscal
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
+    ptr %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
     iXLen %4, iXLen 1)
@@ -3769,11 +3769,11 @@ entry:
 
 declare <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i8(
   <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
+  ptr,
   <vscale x 8 x i8>,
   iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16>* %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -3783,7 +3783,7 @@ define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i8(
     <vscale x 8 x i16> undef,
-    <vscale x 8 x i16>* %0,
+    ptr %0,
     <vscale x 8 x i8> %1,
     iXLen %2)
 
@@ -3792,13 +3792,13 @@ entry:
 
 declare <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8(
   <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
+  ptr,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -3807,7 +3807,7 @@ define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscal
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
+    ptr %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
     iXLen %4, iXLen 1)
@@ -3817,11 +3817,11 @@ entry:
 
 declare <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i8(
   <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
+  ptr,
   <vscale x 16 x i8>,
   iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16>* %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -3831,7 +3831,7 @@ define <vscale x 16 x i16> @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i8(<vscale
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i8(
     <vscale x 16 x i16> undef,
-    <vscale x 16 x i16>* %0,
+    ptr %0,
     <vscale x 16 x i8> %1,
     iXLen %2)
 
@@ -3840,13 +3840,13 @@ entry:
 
 declare <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8(
   <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
+  ptr,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -3855,7 +3855,7 @@ define <vscale x 16 x i16> @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<v
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
+    ptr %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
     iXLen %4, iXLen 1)
@@ -3865,11 +3865,11 @@ entry:
 
 declare <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i8(
   <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
+  ptr,
   <vscale x 32 x i8>,
   iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16>* %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i8(ptr %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -3879,7 +3879,7 @@ define <vscale x 32 x i16> @intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i8(<vscale
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i8(
     <vscale x 32 x i16> undef,
-    <vscale x 32 x i16>* %0,
+    ptr %0,
     <vscale x 32 x i8> %1,
     iXLen %2)
 
@@ -3888,13 +3888,13 @@ entry:
 
 declare <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8(
   <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
+  ptr,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -3903,7 +3903,7 @@ define <vscale x 32 x i16> @intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<v
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
-    <vscale x 32 x i16>* %1,
+    ptr %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
     iXLen %4, iXLen 1)
@@ -3913,11 +3913,11 @@ entry:
 
 declare <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i8(
   <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
+  ptr,
   <vscale x 1 x i8>,
   iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32>* %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -3927,7 +3927,7 @@ define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i8(
     <vscale x 1 x i32> undef,
-    <vscale x 1 x i32>* %0,
+    ptr %0,
     <vscale x 1 x i8> %1,
     iXLen %2)
 
@@ -3936,13 +3936,13 @@ entry:
 
 declare <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8(
   <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
+  ptr,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -3951,7 +3951,7 @@ define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscal
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
+    ptr %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -3961,11 +3961,11 @@ entry:
 
 declare <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i8(
   <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
+  ptr,
   <vscale x 2 x i8>,
   iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32>* %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -3975,7 +3975,7 @@ define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i8(
     <vscale x 2 x i32> undef,
-    <vscale x 2 x i32>* %0,
+    ptr %0,
     <vscale x 2 x i8> %1,
     iXLen %2)
 
@@ -3984,13 +3984,13 @@ entry:
 
 declare <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8(
   <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
+  ptr,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -3999,7 +3999,7 @@ define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscal
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
+    ptr %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
     iXLen %4, iXLen 1)
@@ -4009,11 +4009,11 @@ entry:
 
 declare <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i8(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   <vscale x 4 x i8>,
   iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32>* %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -4023,7 +4023,7 @@ define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i8(
     <vscale x 4 x i32> undef,
-    <vscale x 4 x i32>* %0,
+    ptr %0,
     <vscale x 4 x i8> %1,
     iXLen %2)
 
@@ -4032,13 +4032,13 @@ entry:
 
 declare <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -4047,7 +4047,7 @@ define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscal
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
+    ptr %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
     iXLen %4, iXLen 1)
@@ -4057,11 +4057,11 @@ entry:
 
 declare <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i8(
   <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
+  ptr,
   <vscale x 8 x i8>,
   iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32>* %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -4071,7 +4071,7 @@ define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i8(
     <vscale x 8 x i32> undef,
-    <vscale x 8 x i32>* %0,
+    ptr %0,
     <vscale x 8 x i8> %1,
     iXLen %2)
 
@@ -4080,13 +4080,13 @@ entry:
 
 declare <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8(
   <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
+  ptr,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -4095,7 +4095,7 @@ define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscal
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
+    ptr %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
     iXLen %4, iXLen 1)
@@ -4105,11 +4105,11 @@ entry:
 
 declare <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i8(
   <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
+  ptr,
   <vscale x 16 x i8>,
   iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32>* %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -4119,7 +4119,7 @@ define <vscale x 16 x i32> @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i8(<vscale
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i8(
     <vscale x 16 x i32> undef,
-    <vscale x 16 x i32>* %0,
+    ptr %0,
     <vscale x 16 x i8> %1,
     iXLen %2)
 
@@ -4128,13 +4128,13 @@ entry:
 
 declare <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8(
   <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
+  ptr,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -4143,7 +4143,7 @@ define <vscale x 16 x i32> @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<v
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
+    ptr %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
     iXLen %4, iXLen 1)
@@ -4153,11 +4153,11 @@ entry:
 
 declare <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i8(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   <vscale x 1 x i8>,
   iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64>* %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -4167,7 +4167,7 @@ define <vscale x 1 x i64> @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i8(
     <vscale x 1 x i64> undef,
-    <vscale x 1 x i64>* %0,
+    ptr %0,
     <vscale x 1 x i8> %1,
     iXLen %2)
 
@@ -4176,13 +4176,13 @@ entry:
 
 declare <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -4191,7 +4191,7 @@ define <vscale x 1 x i64> @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscal
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
+    ptr %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -4201,11 +4201,11 @@ entry:
 
 declare <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i8(
   <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
+  ptr,
   <vscale x 2 x i8>,
   iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64>* %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -4215,7 +4215,7 @@ define <vscale x 2 x i64> @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i8(
     <vscale x 2 x i64> undef,
-    <vscale x 2 x i64>* %0,
+    ptr %0,
     <vscale x 2 x i8> %1,
     iXLen %2)
 
@@ -4224,13 +4224,13 @@ entry:
 
 declare <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8(
   <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
+  ptr,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -4239,7 +4239,7 @@ define <vscale x 2 x i64> @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscal
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
+    ptr %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
     iXLen %4, iXLen 1)
@@ -4249,11 +4249,11 @@ entry:
 
 declare <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i8(
   <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
+  ptr,
   <vscale x 4 x i8>,
   iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64>* %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -4263,7 +4263,7 @@ define <vscale x 4 x i64> @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i8(
     <vscale x 4 x i64> undef,
-    <vscale x 4 x i64>* %0,
+    ptr %0,
     <vscale x 4 x i8> %1,
     iXLen %2)
 
@@ -4272,13 +4272,13 @@ entry:
 
 declare <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8(
   <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
+  ptr,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -4287,7 +4287,7 @@ define <vscale x 4 x i64> @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscal
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
+    ptr %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
     iXLen %4, iXLen 1)
@@ -4297,11 +4297,11 @@ entry:
 
 declare <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i8(
   <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
+  ptr,
   <vscale x 8 x i8>,
   iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64>* %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -4311,7 +4311,7 @@ define <vscale x 8 x i64> @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i8(
     <vscale x 8 x i64> undef,
-    <vscale x 8 x i64>* %0,
+    ptr %0,
     <vscale x 8 x i8> %1,
     iXLen %2)
 
@@ -4320,13 +4320,13 @@ entry:
 
 declare <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8(
   <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
+  ptr,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -4335,7 +4335,7 @@ define <vscale x 8 x i64> @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscal
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
+    ptr %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
     iXLen %4, iXLen 1)
@@ -4345,11 +4345,11 @@ entry:
 
 declare <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i8(
   <vscale x 1 x half>,
-  <vscale x 1 x half>*,
+  ptr,
   <vscale x 1 x i8>,
   iXLen);
 
-define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half>* %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -4359,7 +4359,7 @@ define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i8(<vscale x
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i8(
     <vscale x 1 x half> undef,
-    <vscale x 1 x half>* %0,
+    ptr %0,
     <vscale x 1 x i8> %1,
     iXLen %2)
 
@@ -4368,13 +4368,13 @@ entry:
 
 declare <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i8(
   <vscale x 1 x half>,
-  <vscale x 1 x half>*,
+  ptr,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -4383,7 +4383,7 @@ define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vsca
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i8(
     <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
+    ptr %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -4393,11 +4393,11 @@ entry:
 
 declare <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i8(
   <vscale x 2 x half>,
-  <vscale x 2 x half>*,
+  ptr,
   <vscale x 2 x i8>,
   iXLen);
 
-define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half>* %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -4407,7 +4407,7 @@ define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i8(<vscale x
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i8(
     <vscale x 2 x half> undef,
-    <vscale x 2 x half>* %0,
+    ptr %0,
     <vscale x 2 x i8> %1,
     iXLen %2)
 
@@ -4416,13 +4416,13 @@ entry:
 
 declare <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i8(
   <vscale x 2 x half>,
-  <vscale x 2 x half>*,
+  ptr,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -4431,7 +4431,7 @@ define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vsca
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i8(
     <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
+    ptr %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
     iXLen %4, iXLen 1)
@@ -4441,11 +4441,11 @@ entry:
 
 declare <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i8(
   <vscale x 4 x half>,
-  <vscale x 4 x half>*,
+  ptr,
   <vscale x 4 x i8>,
   iXLen);
 
-define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half>* %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -4455,7 +4455,7 @@ define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i8(<vscale x
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i8(
     <vscale x 4 x half> undef,
-    <vscale x 4 x half>* %0,
+    ptr %0,
     <vscale x 4 x i8> %1,
     iXLen %2)
 
@@ -4464,13 +4464,13 @@ entry:
 
 declare <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i8(
   <vscale x 4 x half>,
-  <vscale x 4 x half>*,
+  ptr,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -4479,7 +4479,7 @@ define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vsca
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i8(
     <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
+    ptr %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
     iXLen %4, iXLen 1)
@@ -4489,11 +4489,11 @@ entry:
 
 declare <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i8(
   <vscale x 8 x half>,
-  <vscale x 8 x half>*,
+  ptr,
   <vscale x 8 x i8>,
   iXLen);
 
-define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half>* %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -4503,7 +4503,7 @@ define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i8(<vscale x
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i8(
     <vscale x 8 x half> undef,
-    <vscale x 8 x half>* %0,
+    ptr %0,
     <vscale x 8 x i8> %1,
     iXLen %2)
 
@@ -4512,13 +4512,13 @@ entry:
 
 declare <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i8(
   <vscale x 8 x half>,
-  <vscale x 8 x half>*,
+  ptr,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -4527,7 +4527,7 @@ define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vsca
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i8(
     <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
+    ptr %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
     iXLen %4, iXLen 1)
@@ -4537,11 +4537,11 @@ entry:
 
 declare <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i8(
   <vscale x 16 x half>,
-  <vscale x 16 x half>*,
+  ptr,
   <vscale x 16 x i8>,
   iXLen);
 
-define <vscale x 16 x half> @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half>* %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -4551,7 +4551,7 @@ define <vscale x 16 x half> @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i8(<vscal
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i8(
     <vscale x 16 x half> undef,
-    <vscale x 16 x half>* %0,
+    ptr %0,
     <vscale x 16 x i8> %1,
     iXLen %2)
 
@@ -4560,13 +4560,13 @@ entry:
 
 declare <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i8(
   <vscale x 16 x half>,
-  <vscale x 16 x half>*,
+  ptr,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 16 x half> @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -4575,7 +4575,7 @@ define <vscale x 16 x half> @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i8(
     <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
+    ptr %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
     iXLen %4, iXLen 1)
@@ -4585,11 +4585,11 @@ entry:
 
 declare <vscale x 32 x half> @llvm.riscv.vloxei.nxv32f16.nxv32i8(
   <vscale x 32 x half>,
-  <vscale x 32 x half>*,
+  ptr,
   <vscale x 32 x i8>,
   iXLen);
 
-define <vscale x 32 x half> @intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half>* %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
+define <vscale x 32 x half> @intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i8(ptr %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -4599,7 +4599,7 @@ define <vscale x 32 x half> @intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i8(<vscal
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vloxei.nxv32f16.nxv32i8(
     <vscale x 32 x half> undef,
-    <vscale x 32 x half>* %0,
+    ptr %0,
     <vscale x 32 x i8> %1,
     iXLen %2)
 
@@ -4608,13 +4608,13 @@ entry:
 
 declare <vscale x 32 x half> @llvm.riscv.vloxei.mask.nxv32f16.nxv32i8(
   <vscale x 32 x half>,
-  <vscale x 32 x half>*,
+  ptr,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 32 x half> @intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+define <vscale x 32 x half> @intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -4623,7 +4623,7 @@ define <vscale x 32 x half> @intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vloxei.mask.nxv32f16.nxv32i8(
     <vscale x 32 x half> %0,
-    <vscale x 32 x half>* %1,
+    ptr %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
     iXLen %4, iXLen 1)
@@ -4633,11 +4633,11 @@ entry:
 
 declare <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i8(
   <vscale x 1 x float>,
-  <vscale x 1 x float>*,
+  ptr,
   <vscale x 1 x i8>,
   iXLen);
 
-define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float>* %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -4647,7 +4647,7 @@ define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i8(<vscale x
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i8(
     <vscale x 1 x float> undef,
-    <vscale x 1 x float>* %0,
+    ptr %0,
     <vscale x 1 x i8> %1,
     iXLen %2)
 
@@ -4656,13 +4656,13 @@ entry:
 
 declare <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8(
   <vscale x 1 x float>,
-  <vscale x 1 x float>*,
+  ptr,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -4671,7 +4671,7 @@ define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vsc
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8(
     <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
+    ptr %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -4681,11 +4681,11 @@ entry:
 
 declare <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i8(
   <vscale x 2 x float>,
-  <vscale x 2 x float>*,
+  ptr,
   <vscale x 2 x i8>,
   iXLen);
 
-define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float>* %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -4695,7 +4695,7 @@ define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i8(<vscale x
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i8(
     <vscale x 2 x float> undef,
-    <vscale x 2 x float>* %0,
+    ptr %0,
     <vscale x 2 x i8> %1,
     iXLen %2)
 
@@ -4704,13 +4704,13 @@ entry:
 
 declare <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8(
   <vscale x 2 x float>,
-  <vscale x 2 x float>*,
+  ptr,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -4719,7 +4719,7 @@ define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vsc
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8(
     <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
+    ptr %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
     iXLen %4, iXLen 1)
@@ -4729,11 +4729,11 @@ entry:
 
 declare <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i8(
   <vscale x 4 x float>,
-  <vscale x 4 x float>*,
+  ptr,
   <vscale x 4 x i8>,
   iXLen);
 
-define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float>* %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -4743,7 +4743,7 @@ define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i8(<vscale x
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i8(
     <vscale x 4 x float> undef,
-    <vscale x 4 x float>* %0,
+    ptr %0,
     <vscale x 4 x i8> %1,
     iXLen %2)
 
@@ -4752,13 +4752,13 @@ entry:
 
 declare <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8(
   <vscale x 4 x float>,
-  <vscale x 4 x float>*,
+  ptr,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -4767,7 +4767,7 @@ define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vsc
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8(
     <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
+    ptr %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
     iXLen %4, iXLen 1)
@@ -4777,11 +4777,11 @@ entry:
 
 declare <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i8(
   <vscale x 8 x float>,
-  <vscale x 8 x float>*,
+  ptr,
   <vscale x 8 x i8>,
   iXLen);
 
-define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float>* %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -4791,7 +4791,7 @@ define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i8(<vscale x
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i8(
     <vscale x 8 x float> undef,
-    <vscale x 8 x float>* %0,
+    ptr %0,
     <vscale x 8 x i8> %1,
     iXLen %2)
 
@@ -4800,13 +4800,13 @@ entry:
 
 declare <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8(
   <vscale x 8 x float>,
-  <vscale x 8 x float>*,
+  ptr,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -4815,7 +4815,7 @@ define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vsc
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8(
     <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
+    ptr %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
     iXLen %4, iXLen 1)
@@ -4825,11 +4825,11 @@ entry:
 
 declare <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i8(
   <vscale x 16 x float>,
-  <vscale x 16 x float>*,
+  ptr,
   <vscale x 16 x i8>,
   iXLen);
 
-define <vscale x 16 x float> @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float>* %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -4839,7 +4839,7 @@ define <vscale x 16 x float> @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i8(<vsca
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i8(
     <vscale x 16 x float> undef,
-    <vscale x 16 x float>* %0,
+    ptr %0,
     <vscale x 16 x i8> %1,
     iXLen %2)
 
@@ -4848,13 +4848,13 @@ entry:
 
 declare <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8(
   <vscale x 16 x float>,
-  <vscale x 16 x float>*,
+  ptr,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 16 x float> @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -4863,7 +4863,7 @@ define <vscale x 16 x float> @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i8(
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8(
     <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
+    ptr %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
     iXLen %4, iXLen 1)
@@ -4873,11 +4873,11 @@ entry:
 
 declare <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i8(
   <vscale x 1 x double>,
-  <vscale x 1 x double>*,
+  ptr,
   <vscale x 1 x i8>,
   iXLen);
 
-define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double>* %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -4887,7 +4887,7 @@ define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i8(<vscale
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i8(
     <vscale x 1 x double> undef,
-    <vscale x 1 x double>* %0,
+    ptr %0,
     <vscale x 1 x i8> %1,
     iXLen %2)
 
@@ -4896,13 +4896,13 @@ entry:
 
 declare <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8(
   <vscale x 1 x double>,
-  <vscale x 1 x double>*,
+  ptr,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -4911,7 +4911,7 @@ define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vs
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8(
     <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
+    ptr %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -4921,11 +4921,11 @@ entry:
 
 declare <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i8(
   <vscale x 2 x double>,
-  <vscale x 2 x double>*,
+  ptr,
   <vscale x 2 x i8>,
   iXLen);
 
-define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double>* %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -4935,7 +4935,7 @@ define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i8(<vscale
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i8(
     <vscale x 2 x double> undef,
-    <vscale x 2 x double>* %0,
+    ptr %0,
     <vscale x 2 x i8> %1,
     iXLen %2)
 
@@ -4944,13 +4944,13 @@ entry:
 
 declare <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8(
   <vscale x 2 x double>,
-  <vscale x 2 x double>*,
+  ptr,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -4959,7 +4959,7 @@ define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vs
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8(
     <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
+    ptr %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
     iXLen %4, iXLen 1)
@@ -4969,11 +4969,11 @@ entry:
 
 declare <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i8(
   <vscale x 4 x double>,
-  <vscale x 4 x double>*,
+  ptr,
   <vscale x 4 x i8>,
   iXLen);
 
-define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double>* %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -4983,7 +4983,7 @@ define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i8(<vscale
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i8(
     <vscale x 4 x double> undef,
-    <vscale x 4 x double>* %0,
+    ptr %0,
     <vscale x 4 x i8> %1,
     iXLen %2)
 
@@ -4992,13 +4992,13 @@ entry:
 
 declare <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8(
   <vscale x 4 x double>,
-  <vscale x 4 x double>*,
+  ptr,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -5007,7 +5007,7 @@ define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vs
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8(
     <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
+    ptr %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
     iXLen %4, iXLen 1)
@@ -5017,11 +5017,11 @@ entry:
 
 declare <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i8(
   <vscale x 8 x double>,
-  <vscale x 8 x double>*,
+  ptr,
   <vscale x 8 x i8>,
   iXLen);
 
-define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double>* %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -5031,7 +5031,7 @@ define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i8(<vscale
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i8(
     <vscale x 8 x double> undef,
-    <vscale x 8 x double>* %0,
+    ptr %0,
     <vscale x 8 x i8> %1,
     iXLen %2)
 
@@ -5040,13 +5040,13 @@ entry:
 
 declare <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8(
   <vscale x 8 x double>,
-  <vscale x 8 x double>*,
+  ptr,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -5055,7 +5055,7 @@ define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vs
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8(
     <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
+    ptr %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
     iXLen %4, iXLen 1)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vlse.ll b/llvm/test/CodeGen/RISCV/rvv/vlse.ll
index 5e4576ec07ce6..3b191a7f8bb80 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vlse.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vlse.ll
@@ -6,11 +6,11 @@
 
 declare <vscale x 1 x i64> @llvm.riscv.vlse.nxv1i64(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   iXLen,
   iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vlse_v_nxv1i64_nxv1i64(<vscale x 1 x i64>* %0, iXLen %1, iXLen %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vlse_v_nxv1i64_nxv1i64(ptr %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
@@ -19,7 +19,7 @@ define <vscale x 1 x i64> @intrinsic_vlse_v_nxv1i64_nxv1i64(<vscale x 1 x i64>*
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vlse.nxv1i64(
     <vscale x 1 x i64> undef,
-    <vscale x 1 x i64>* %0,
+    ptr %0,
     iXLen %1,
     iXLen %2)
 
@@ -28,13 +28,13 @@ entry:
 
 declare <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   iXLen,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vlse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vlse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
@@ -43,7 +43,7 @@ define <vscale x 1 x i64> @intrinsic_vlse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64(
     <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -53,11 +53,11 @@ entry:
 
 declare <vscale x 2 x i64> @llvm.riscv.vlse.nxv2i64(
   <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
+  ptr,
   iXLen,
   iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vlse_v_nxv2i64_nxv2i64(<vscale x 2 x i64>* %0, iXLen %1, iXLen %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vlse_v_nxv2i64_nxv2i64(ptr %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
@@ -66,7 +66,7 @@ define <vscale x 2 x i64> @intrinsic_vlse_v_nxv2i64_nxv2i64(<vscale x 2 x i64>*
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vlse.nxv2i64(
     <vscale x 2 x i64> undef,
-    <vscale x 2 x i64>* %0,
+    ptr %0,
     iXLen %1,
     iXLen %2)
 
@@ -75,13 +75,13 @@ entry:
 
 declare <vscale x 2 x i64> @llvm.riscv.vlse.mask.nxv2i64(
   <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
+  ptr,
   iXLen,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vlse_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vlse_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, ptr %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
@@ -90,7 +90,7 @@ define <vscale x 2 x i64> @intrinsic_vlse_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vlse.mask.nxv2i64(
     <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 2 x i1> %3,
     iXLen %4, iXLen 1)
@@ -100,11 +100,11 @@ entry:
 
 declare <vscale x 4 x i64> @llvm.riscv.vlse.nxv4i64(
   <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
+  ptr,
   iXLen,
   iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vlse_v_nxv4i64_nxv4i64(<vscale x 4 x i64>* %0, iXLen %1, iXLen %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vlse_v_nxv4i64_nxv4i64(ptr %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
@@ -113,7 +113,7 @@ define <vscale x 4 x i64> @intrinsic_vlse_v_nxv4i64_nxv4i64(<vscale x 4 x i64>*
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vlse.nxv4i64(
     <vscale x 4 x i64> undef,
-    <vscale x 4 x i64>* %0,
+    ptr %0,
     iXLen %1,
     iXLen %2)
 
@@ -122,13 +122,13 @@ entry:
 
 declare <vscale x 4 x i64> @llvm.riscv.vlse.mask.nxv4i64(
   <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
+  ptr,
   iXLen,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vlse_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vlse_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, ptr %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
@@ -137,7 +137,7 @@ define <vscale x 4 x i64> @intrinsic_vlse_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vlse.mask.nxv4i64(
     <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 4 x i1> %3,
     iXLen %4, iXLen 1)
@@ -147,11 +147,11 @@ entry:
 
 declare <vscale x 8 x i64> @llvm.riscv.vlse.nxv8i64(
   <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
+  ptr,
   iXLen,
   iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vlse_v_nxv8i64_nxv8i64(<vscale x 8 x i64>* %0, iXLen %1, iXLen %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vlse_v_nxv8i64_nxv8i64(ptr %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
@@ -160,7 +160,7 @@ define <vscale x 8 x i64> @intrinsic_vlse_v_nxv8i64_nxv8i64(<vscale x 8 x i64>*
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vlse.nxv8i64(
     <vscale x 8 x i64> undef,
-    <vscale x 8 x i64>* %0,
+    ptr %0,
     iXLen %1,
     iXLen %2)
 
@@ -169,13 +169,13 @@ entry:
 
 declare <vscale x 8 x i64> @llvm.riscv.vlse.mask.nxv8i64(
   <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
+  ptr,
   iXLen,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vlse_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vlse_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, ptr %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
@@ -184,7 +184,7 @@ define <vscale x 8 x i64> @intrinsic_vlse_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vlse.mask.nxv8i64(
     <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 8 x i1> %3,
     iXLen %4, iXLen 1)
@@ -194,11 +194,11 @@ entry:
 
 declare <vscale x 1 x double> @llvm.riscv.vlse.nxv1f64(
   <vscale x 1 x double>,
-  <vscale x 1 x double>*,
+  ptr,
   iXLen,
   iXLen);
 
-define <vscale x 1 x double> @intrinsic_vlse_v_nxv1f64_nxv1f64(<vscale x 1 x double>* %0, iXLen %1, iXLen %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vlse_v_nxv1f64_nxv1f64(ptr %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
@@ -207,7 +207,7 @@ define <vscale x 1 x double> @intrinsic_vlse_v_nxv1f64_nxv1f64(<vscale x 1 x dou
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vlse.nxv1f64(
     <vscale x 1 x double> undef,
-    <vscale x 1 x double>* %0,
+    ptr %0,
     iXLen %1,
     iXLen %2)
 
@@ -216,13 +216,13 @@ entry:
 
 declare <vscale x 1 x double> @llvm.riscv.vlse.mask.nxv1f64(
   <vscale x 1 x double>,
-  <vscale x 1 x double>*,
+  ptr,
   iXLen,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x double> @intrinsic_vlse_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vlse_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
@@ -231,7 +231,7 @@ define <vscale x 1 x double> @intrinsic_vlse_mask_v_nxv1f64_nxv1f64(<vscale x 1
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vlse.mask.nxv1f64(
     <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -241,11 +241,11 @@ entry:
 
 declare <vscale x 2 x double> @llvm.riscv.vlse.nxv2f64(
   <vscale x 2 x double>,
-  <vscale x 2 x double>*,
+  ptr,
   iXLen,
   iXLen);
 
-define <vscale x 2 x double> @intrinsic_vlse_v_nxv2f64_nxv2f64(<vscale x 2 x double>* %0, iXLen %1, iXLen %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vlse_v_nxv2f64_nxv2f64(ptr %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
@@ -254,7 +254,7 @@ define <vscale x 2 x double> @intrinsic_vlse_v_nxv2f64_nxv2f64(<vscale x 2 x dou
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vlse.nxv2f64(
     <vscale x 2 x double> undef,
-    <vscale x 2 x double>* %0,
+    ptr %0,
     iXLen %1,
     iXLen %2)
 
@@ -263,13 +263,13 @@ entry:
 
 declare <vscale x 2 x double> @llvm.riscv.vlse.mask.nxv2f64(
   <vscale x 2 x double>,
-  <vscale x 2 x double>*,
+  ptr,
   iXLen,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x double> @intrinsic_vlse_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vlse_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, ptr %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
@@ -278,7 +278,7 @@ define <vscale x 2 x double> @intrinsic_vlse_mask_v_nxv2f64_nxv2f64(<vscale x 2
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vlse.mask.nxv2f64(
     <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 2 x i1> %3,
     iXLen %4, iXLen 1)
@@ -288,11 +288,11 @@ entry:
 
 declare <vscale x 4 x double> @llvm.riscv.vlse.nxv4f64(
   <vscale x 4 x double>,
-  <vscale x 4 x double>*,
+  ptr,
   iXLen,
   iXLen);
 
-define <vscale x 4 x double> @intrinsic_vlse_v_nxv4f64_nxv4f64(<vscale x 4 x double>* %0, iXLen %1, iXLen %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vlse_v_nxv4f64_nxv4f64(ptr %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
@@ -301,7 +301,7 @@ define <vscale x 4 x double> @intrinsic_vlse_v_nxv4f64_nxv4f64(<vscale x 4 x dou
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vlse.nxv4f64(
     <vscale x 4 x double> undef,
-    <vscale x 4 x double>* %0,
+    ptr %0,
     iXLen %1,
     iXLen %2)
 
@@ -310,13 +310,13 @@ entry:
 
 declare <vscale x 4 x double> @llvm.riscv.vlse.mask.nxv4f64(
   <vscale x 4 x double>,
-  <vscale x 4 x double>*,
+  ptr,
   iXLen,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x double> @intrinsic_vlse_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vlse_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, ptr %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
@@ -325,7 +325,7 @@ define <vscale x 4 x double> @intrinsic_vlse_mask_v_nxv4f64_nxv4f64(<vscale x 4
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vlse.mask.nxv4f64(
     <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 4 x i1> %3,
     iXLen %4, iXLen 1)
@@ -335,11 +335,11 @@ entry:
 
 declare <vscale x 8 x double> @llvm.riscv.vlse.nxv8f64(
   <vscale x 8 x double>,
-  <vscale x 8 x double>*,
+  ptr,
   iXLen,
   iXLen);
 
-define <vscale x 8 x double> @intrinsic_vlse_v_nxv8f64_nxv8f64(<vscale x 8 x double>* %0, iXLen %1, iXLen %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vlse_v_nxv8f64_nxv8f64(ptr %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv8f64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
@@ -348,7 +348,7 @@ define <vscale x 8 x double> @intrinsic_vlse_v_nxv8f64_nxv8f64(<vscale x 8 x dou
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vlse.nxv8f64(
     <vscale x 8 x double> undef,
-    <vscale x 8 x double>* %0,
+    ptr %0,
     iXLen %1,
     iXLen %2)
 
@@ -357,13 +357,13 @@ entry:
 
 declare <vscale x 8 x double> @llvm.riscv.vlse.mask.nxv8f64(
   <vscale x 8 x double>,
-  <vscale x 8 x double>*,
+  ptr,
   iXLen,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x double> @intrinsic_vlse_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vlse_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, ptr %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
@@ -372,7 +372,7 @@ define <vscale x 8 x double> @intrinsic_vlse_mask_v_nxv8f64_nxv8f64(<vscale x 8
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vlse.mask.nxv8f64(
     <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 8 x i1> %3,
     iXLen %4, iXLen 1)
@@ -382,11 +382,11 @@ entry:
 
 declare <vscale x 1 x i32> @llvm.riscv.vlse.nxv1i32(
   <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
+  ptr,
   iXLen,
   iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vlse_v_nxv1i32_nxv1i32(<vscale x 1 x i32>* %0, iXLen %1, iXLen %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vlse_v_nxv1i32_nxv1i32(ptr %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
@@ -395,7 +395,7 @@ define <vscale x 1 x i32> @intrinsic_vlse_v_nxv1i32_nxv1i32(<vscale x 1 x i32>*
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vlse.nxv1i32(
     <vscale x 1 x i32> undef,
-    <vscale x 1 x i32>* %0,
+    ptr %0,
     iXLen %1,
     iXLen %2)
 
@@ -404,13 +404,13 @@ entry:
 
 declare <vscale x 1 x i32> @llvm.riscv.vlse.mask.nxv1i32(
   <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
+  ptr,
   iXLen,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vlse_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vlse_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
@@ -419,7 +419,7 @@ define <vscale x 1 x i32> @intrinsic_vlse_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vlse.mask.nxv1i32(
     <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -429,11 +429,11 @@ entry:
 
 declare <vscale x 2 x i32> @llvm.riscv.vlse.nxv2i32(
   <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
+  ptr,
   iXLen,
   iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vlse_v_nxv2i32_nxv2i32(<vscale x 2 x i32>* %0, iXLen %1, iXLen %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vlse_v_nxv2i32_nxv2i32(ptr %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
@@ -442,7 +442,7 @@ define <vscale x 2 x i32> @intrinsic_vlse_v_nxv2i32_nxv2i32(<vscale x 2 x i32>*
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vlse.nxv2i32(
     <vscale x 2 x i32> undef,
-    <vscale x 2 x i32>* %0,
+    ptr %0,
     iXLen %1,
     iXLen %2)
 
@@ -451,13 +451,13 @@ entry:
 
 declare <vscale x 2 x i32> @llvm.riscv.vlse.mask.nxv2i32(
   <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
+  ptr,
   iXLen,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vlse_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vlse_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, ptr %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
@@ -466,7 +466,7 @@ define <vscale x 2 x i32> @intrinsic_vlse_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vlse.mask.nxv2i32(
     <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 2 x i1> %3,
     iXLen %4, iXLen 1)
@@ -476,11 +476,11 @@ entry:
 
 declare <vscale x 4 x i32> @llvm.riscv.vlse.nxv4i32(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   iXLen,
   iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vlse_v_nxv4i32_nxv4i32(<vscale x 4 x i32>* %0, iXLen %1, iXLen %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vlse_v_nxv4i32_nxv4i32(ptr %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
@@ -489,7 +489,7 @@ define <vscale x 4 x i32> @intrinsic_vlse_v_nxv4i32_nxv4i32(<vscale x 4 x i32>*
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vlse.nxv4i32(
     <vscale x 4 x i32> undef,
-    <vscale x 4 x i32>* %0,
+    ptr %0,
     iXLen %1,
     iXLen %2)
 
@@ -498,13 +498,13 @@ entry:
 
 declare <vscale x 4 x i32> @llvm.riscv.vlse.mask.nxv4i32(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   iXLen,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vlse_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vlse_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, ptr %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
@@ -513,7 +513,7 @@ define <vscale x 4 x i32> @intrinsic_vlse_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vlse.mask.nxv4i32(
     <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 4 x i1> %3,
     iXLen %4, iXLen 1)
@@ -523,11 +523,11 @@ entry:
 
 declare <vscale x 8 x i32> @llvm.riscv.vlse.nxv8i32(
   <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
+  ptr,
   iXLen,
   iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vlse_v_nxv8i32_nxv8i32(<vscale x 8 x i32>* %0, iXLen %1, iXLen %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vlse_v_nxv8i32_nxv8i32(ptr %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, ma
@@ -536,7 +536,7 @@ define <vscale x 8 x i32> @intrinsic_vlse_v_nxv8i32_nxv8i32(<vscale x 8 x i32>*
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vlse.nxv8i32(
     <vscale x 8 x i32> undef,
-    <vscale x 8 x i32>* %0,
+    ptr %0,
     iXLen %1,
     iXLen %2)
 
@@ -545,13 +545,13 @@ entry:
 
 declare <vscale x 8 x i32> @llvm.riscv.vlse.mask.nxv8i32(
   <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
+  ptr,
   iXLen,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vlse_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vlse_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, ptr %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, mu
@@ -560,7 +560,7 @@ define <vscale x 8 x i32> @intrinsic_vlse_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vlse.mask.nxv8i32(
     <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 8 x i1> %3,
     iXLen %4, iXLen 1)
@@ -570,11 +570,11 @@ entry:
 
 declare <vscale x 16 x i32> @llvm.riscv.vlse.nxv16i32(
   <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
+  ptr,
   iXLen,
   iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vlse_v_nxv16i32_nxv16i32(<vscale x 16 x i32>* %0, iXLen %1, iXLen %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vlse_v_nxv16i32_nxv16i32(ptr %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
@@ -583,7 +583,7 @@ define <vscale x 16 x i32> @intrinsic_vlse_v_nxv16i32_nxv16i32(<vscale x 16 x i3
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vlse.nxv16i32(
     <vscale x 16 x i32> undef,
-    <vscale x 16 x i32>* %0,
+    ptr %0,
     iXLen %1,
     iXLen %2)
 
@@ -592,13 +592,13 @@ entry:
 
 declare <vscale x 16 x i32> @llvm.riscv.vlse.mask.nxv16i32(
   <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
+  ptr,
   iXLen,
   <vscale x 16 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vlse_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vlse_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, ptr %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
@@ -607,7 +607,7 @@ define <vscale x 16 x i32> @intrinsic_vlse_mask_v_nxv16i32_nxv16i32(<vscale x 16
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vlse.mask.nxv16i32(
     <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 16 x i1> %3,
     iXLen %4, iXLen 1)
@@ -617,11 +617,11 @@ entry:
 
 declare <vscale x 1 x float> @llvm.riscv.vlse.nxv1f32(
   <vscale x 1 x float>,
-  <vscale x 1 x float>*,
+  ptr,
   iXLen,
   iXLen);
 
-define <vscale x 1 x float> @intrinsic_vlse_v_nxv1f32_nxv1f32(<vscale x 1 x float>* %0, iXLen %1, iXLen %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vlse_v_nxv1f32_nxv1f32(ptr %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
@@ -630,7 +630,7 @@ define <vscale x 1 x float> @intrinsic_vlse_v_nxv1f32_nxv1f32(<vscale x 1 x floa
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vlse.nxv1f32(
     <vscale x 1 x float> undef,
-    <vscale x 1 x float>* %0,
+    ptr %0,
     iXLen %1,
     iXLen %2)
 
@@ -639,13 +639,13 @@ entry:
 
 declare <vscale x 1 x float> @llvm.riscv.vlse.mask.nxv1f32(
   <vscale x 1 x float>,
-  <vscale x 1 x float>*,
+  ptr,
   iXLen,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x float> @intrinsic_vlse_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vlse_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
@@ -654,7 +654,7 @@ define <vscale x 1 x float> @intrinsic_vlse_mask_v_nxv1f32_nxv1f32(<vscale x 1 x
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vlse.mask.nxv1f32(
     <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -664,11 +664,11 @@ entry:
 
 declare <vscale x 2 x float> @llvm.riscv.vlse.nxv2f32(
   <vscale x 2 x float>,
-  <vscale x 2 x float>*,
+  ptr,
   iXLen,
   iXLen);
 
-define <vscale x 2 x float> @intrinsic_vlse_v_nxv2f32_nxv2f32(<vscale x 2 x float>* %0, iXLen %1, iXLen %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vlse_v_nxv2f32_nxv2f32(ptr %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
@@ -677,7 +677,7 @@ define <vscale x 2 x float> @intrinsic_vlse_v_nxv2f32_nxv2f32(<vscale x 2 x floa
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vlse.nxv2f32(
     <vscale x 2 x float> undef,
-    <vscale x 2 x float>* %0,
+    ptr %0,
     iXLen %1,
     iXLen %2)
 
@@ -686,13 +686,13 @@ entry:
 
 declare <vscale x 2 x float> @llvm.riscv.vlse.mask.nxv2f32(
   <vscale x 2 x float>,
-  <vscale x 2 x float>*,
+  ptr,
   iXLen,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x float> @intrinsic_vlse_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vlse_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, ptr %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
@@ -701,7 +701,7 @@ define <vscale x 2 x float> @intrinsic_vlse_mask_v_nxv2f32_nxv2f32(<vscale x 2 x
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vlse.mask.nxv2f32(
     <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 2 x i1> %3,
     iXLen %4, iXLen 1)
@@ -711,11 +711,11 @@ entry:
 
 declare <vscale x 4 x float> @llvm.riscv.vlse.nxv4f32(
   <vscale x 4 x float>,
-  <vscale x 4 x float>*,
+  ptr,
   iXLen,
   iXLen);
 
-define <vscale x 4 x float> @intrinsic_vlse_v_nxv4f32_nxv4f32(<vscale x 4 x float>* %0, iXLen %1, iXLen %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vlse_v_nxv4f32_nxv4f32(ptr %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
@@ -724,7 +724,7 @@ define <vscale x 4 x float> @intrinsic_vlse_v_nxv4f32_nxv4f32(<vscale x 4 x floa
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vlse.nxv4f32(
     <vscale x 4 x float> undef,
-    <vscale x 4 x float>* %0,
+    ptr %0,
     iXLen %1,
     iXLen %2)
 
@@ -733,13 +733,13 @@ entry:
 
 declare <vscale x 4 x float> @llvm.riscv.vlse.mask.nxv4f32(
   <vscale x 4 x float>,
-  <vscale x 4 x float>*,
+  ptr,
   iXLen,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x float> @intrinsic_vlse_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vlse_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, ptr %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
@@ -748,7 +748,7 @@ define <vscale x 4 x float> @intrinsic_vlse_mask_v_nxv4f32_nxv4f32(<vscale x 4 x
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vlse.mask.nxv4f32(
     <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 4 x i1> %3,
     iXLen %4, iXLen 1)
@@ -758,11 +758,11 @@ entry:
 
 declare <vscale x 8 x float> @llvm.riscv.vlse.nxv8f32(
   <vscale x 8 x float>,
-  <vscale x 8 x float>*,
+  ptr,
   iXLen,
   iXLen);
 
-define <vscale x 8 x float> @intrinsic_vlse_v_nxv8f32_nxv8f32(<vscale x 8 x float>* %0, iXLen %1, iXLen %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vlse_v_nxv8f32_nxv8f32(ptr %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, ma
@@ -771,7 +771,7 @@ define <vscale x 8 x float> @intrinsic_vlse_v_nxv8f32_nxv8f32(<vscale x 8 x floa
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vlse.nxv8f32(
     <vscale x 8 x float> undef,
-    <vscale x 8 x float>* %0,
+    ptr %0,
     iXLen %1,
     iXLen %2)
 
@@ -780,13 +780,13 @@ entry:
 
 declare <vscale x 8 x float> @llvm.riscv.vlse.mask.nxv8f32(
   <vscale x 8 x float>,
-  <vscale x 8 x float>*,
+  ptr,
   iXLen,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x float> @intrinsic_vlse_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vlse_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, ptr %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, mu
@@ -795,7 +795,7 @@ define <vscale x 8 x float> @intrinsic_vlse_mask_v_nxv8f32_nxv8f32(<vscale x 8 x
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vlse.mask.nxv8f32(
     <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 8 x i1> %3,
     iXLen %4, iXLen 1)
@@ -805,11 +805,11 @@ entry:
 
 declare <vscale x 16 x float> @llvm.riscv.vlse.nxv16f32(
   <vscale x 16 x float>,
-  <vscale x 16 x float>*,
+  ptr,
   iXLen,
   iXLen);
 
-define <vscale x 16 x float> @intrinsic_vlse_v_nxv16f32_nxv16f32(<vscale x 16 x float>* %0, iXLen %1, iXLen %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vlse_v_nxv16f32_nxv16f32(ptr %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv16f32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
@@ -818,7 +818,7 @@ define <vscale x 16 x float> @intrinsic_vlse_v_nxv16f32_nxv16f32(<vscale x 16 x
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vlse.nxv16f32(
     <vscale x 16 x float> undef,
-    <vscale x 16 x float>* %0,
+    ptr %0,
     iXLen %1,
     iXLen %2)
 
@@ -827,13 +827,13 @@ entry:
 
 declare <vscale x 16 x float> @llvm.riscv.vlse.mask.nxv16f32(
   <vscale x 16 x float>,
-  <vscale x 16 x float>*,
+  ptr,
   iXLen,
   <vscale x 16 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 16 x float> @intrinsic_vlse_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vlse_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, ptr %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16f32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
@@ -842,7 +842,7 @@ define <vscale x 16 x float> @intrinsic_vlse_mask_v_nxv16f32_nxv16f32(<vscale x
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vlse.mask.nxv16f32(
     <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 16 x i1> %3,
     iXLen %4, iXLen 1)
@@ -852,11 +852,11 @@ entry:
 
 declare <vscale x 1 x i16> @llvm.riscv.vlse.nxv1i16(
   <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
+  ptr,
   iXLen,
   iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vlse_v_nxv1i16_nxv1i16(<vscale x 1 x i16>* %0, iXLen %1, iXLen %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vlse_v_nxv1i16_nxv1i16(ptr %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
@@ -865,7 +865,7 @@ define <vscale x 1 x i16> @intrinsic_vlse_v_nxv1i16_nxv1i16(<vscale x 1 x i16>*
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vlse.nxv1i16(
     <vscale x 1 x i16> undef,
-    <vscale x 1 x i16>* %0,
+    ptr %0,
     iXLen %1,
     iXLen %2)
 
@@ -874,13 +874,13 @@ entry:
 
 declare <vscale x 1 x i16> @llvm.riscv.vlse.mask.nxv1i16(
   <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
+  ptr,
   iXLen,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vlse_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vlse_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
@@ -889,7 +889,7 @@ define <vscale x 1 x i16> @intrinsic_vlse_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vlse.mask.nxv1i16(
     <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -899,11 +899,11 @@ entry:
 
 declare <vscale x 2 x i16> @llvm.riscv.vlse.nxv2i16(
   <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
+  ptr,
   iXLen,
   iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vlse_v_nxv2i16_nxv2i16(<vscale x 2 x i16>* %0, iXLen %1, iXLen %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vlse_v_nxv2i16_nxv2i16(ptr %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
@@ -912,7 +912,7 @@ define <vscale x 2 x i16> @intrinsic_vlse_v_nxv2i16_nxv2i16(<vscale x 2 x i16>*
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vlse.nxv2i16(
     <vscale x 2 x i16> undef,
-    <vscale x 2 x i16>* %0,
+    ptr %0,
     iXLen %1,
     iXLen %2)
 
@@ -921,13 +921,13 @@ entry:
 
 declare <vscale x 2 x i16> @llvm.riscv.vlse.mask.nxv2i16(
   <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
+  ptr,
   iXLen,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vlse_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vlse_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, ptr %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
@@ -936,7 +936,7 @@ define <vscale x 2 x i16> @intrinsic_vlse_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vlse.mask.nxv2i16(
     <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 2 x i1> %3,
     iXLen %4, iXLen 1)
@@ -946,11 +946,11 @@ entry:
 
 declare <vscale x 4 x i16> @llvm.riscv.vlse.nxv4i16(
   <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
+  ptr,
   iXLen,
   iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vlse_v_nxv4i16_nxv4i16(<vscale x 4 x i16>* %0, iXLen %1, iXLen %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vlse_v_nxv4i16_nxv4i16(ptr %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
@@ -959,7 +959,7 @@ define <vscale x 4 x i16> @intrinsic_vlse_v_nxv4i16_nxv4i16(<vscale x 4 x i16>*
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vlse.nxv4i16(
     <vscale x 4 x i16> undef,
-    <vscale x 4 x i16>* %0,
+    ptr %0,
     iXLen %1,
     iXLen %2)
 
@@ -968,13 +968,13 @@ entry:
 
 declare <vscale x 4 x i16> @llvm.riscv.vlse.mask.nxv4i16(
   <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
+  ptr,
   iXLen,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vlse_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vlse_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, ptr %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
@@ -983,7 +983,7 @@ define <vscale x 4 x i16> @intrinsic_vlse_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vlse.mask.nxv4i16(
     <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 4 x i1> %3,
     iXLen %4, iXLen 1)
@@ -993,11 +993,11 @@ entry:
 
 declare <vscale x 8 x i16> @llvm.riscv.vlse.nxv8i16(
   <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
+  ptr,
   iXLen,
   iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vlse_v_nxv8i16_nxv8i16(<vscale x 8 x i16>* %0, iXLen %1, iXLen %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vlse_v_nxv8i16_nxv8i16(ptr %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, ma
@@ -1006,7 +1006,7 @@ define <vscale x 8 x i16> @intrinsic_vlse_v_nxv8i16_nxv8i16(<vscale x 8 x i16>*
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vlse.nxv8i16(
     <vscale x 8 x i16> undef,
-    <vscale x 8 x i16>* %0,
+    ptr %0,
     iXLen %1,
     iXLen %2)
 
@@ -1015,13 +1015,13 @@ entry:
 
 declare <vscale x 8 x i16> @llvm.riscv.vlse.mask.nxv8i16(
   <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
+  ptr,
   iXLen,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vlse_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vlse_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, ptr %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, mu
@@ -1030,7 +1030,7 @@ define <vscale x 8 x i16> @intrinsic_vlse_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vlse.mask.nxv8i16(
     <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 8 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1040,11 +1040,11 @@ entry:
 
 declare <vscale x 16 x i16> @llvm.riscv.vlse.nxv16i16(
   <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
+  ptr,
   iXLen,
   iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vlse_v_nxv16i16_nxv16i16(<vscale x 16 x i16>* %0, iXLen %1, iXLen %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vlse_v_nxv16i16_nxv16i16(ptr %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
@@ -1053,7 +1053,7 @@ define <vscale x 16 x i16> @intrinsic_vlse_v_nxv16i16_nxv16i16(<vscale x 16 x i1
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vlse.nxv16i16(
     <vscale x 16 x i16> undef,
-    <vscale x 16 x i16>* %0,
+    ptr %0,
     iXLen %1,
     iXLen %2)
 
@@ -1062,13 +1062,13 @@ entry:
 
 declare <vscale x 16 x i16> @llvm.riscv.vlse.mask.nxv16i16(
   <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
+  ptr,
   iXLen,
   <vscale x 16 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vlse_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vlse_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, ptr %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
@@ -1077,7 +1077,7 @@ define <vscale x 16 x i16> @intrinsic_vlse_mask_v_nxv16i16_nxv16i16(<vscale x 16
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vlse.mask.nxv16i16(
     <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 16 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1087,11 +1087,11 @@ entry:
 
 declare <vscale x 32 x i16> @llvm.riscv.vlse.nxv32i16(
   <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
+  ptr,
   iXLen,
   iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vlse_v_nxv32i16_nxv32i16(<vscale x 32 x i16>* %0, iXLen %1, iXLen %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vlse_v_nxv32i16_nxv32i16(ptr %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m8, ta, ma
@@ -1100,7 +1100,7 @@ define <vscale x 32 x i16> @intrinsic_vlse_v_nxv32i16_nxv32i16(<vscale x 32 x i1
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vlse.nxv32i16(
     <vscale x 32 x i16> undef,
-    <vscale x 32 x i16>* %0,
+    ptr %0,
     iXLen %1,
     iXLen %2)
 
@@ -1109,13 +1109,13 @@ entry:
 
 declare <vscale x 32 x i16> @llvm.riscv.vlse.mask.nxv32i16(
   <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
+  ptr,
   iXLen,
   <vscale x 32 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vlse_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vlse_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, ptr %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m8, ta, mu
@@ -1124,7 +1124,7 @@ define <vscale x 32 x i16> @intrinsic_vlse_mask_v_nxv32i16_nxv32i16(<vscale x 32
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vlse.mask.nxv32i16(
     <vscale x 32 x i16> %0,
-    <vscale x 32 x i16>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 32 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1134,11 +1134,11 @@ entry:
 
 declare <vscale x 1 x half> @llvm.riscv.vlse.nxv1f16(
   <vscale x 1 x half>,
-  <vscale x 1 x half>*,
+  ptr,
   iXLen,
   iXLen);
 
-define <vscale x 1 x half> @intrinsic_vlse_v_nxv1f16_nxv1f16(<vscale x 1 x half>* %0, iXLen %1, iXLen %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vlse_v_nxv1f16_nxv1f16(ptr %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
@@ -1147,7 +1147,7 @@ define <vscale x 1 x half> @intrinsic_vlse_v_nxv1f16_nxv1f16(<vscale x 1 x half>
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vlse.nxv1f16(
     <vscale x 1 x half> undef,
-    <vscale x 1 x half>* %0,
+    ptr %0,
     iXLen %1,
     iXLen %2)
 
@@ -1156,13 +1156,13 @@ entry:
 
 declare <vscale x 1 x half> @llvm.riscv.vlse.mask.nxv1f16(
   <vscale x 1 x half>,
-  <vscale x 1 x half>*,
+  ptr,
   iXLen,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x half> @intrinsic_vlse_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vlse_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
@@ -1171,7 +1171,7 @@ define <vscale x 1 x half> @intrinsic_vlse_mask_v_nxv1f16_nxv1f16(<vscale x 1 x
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vlse.mask.nxv1f16(
     <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1181,11 +1181,11 @@ entry:
 
 declare <vscale x 2 x half> @llvm.riscv.vlse.nxv2f16(
   <vscale x 2 x half>,
-  <vscale x 2 x half>*,
+  ptr,
   iXLen,
   iXLen);
 
-define <vscale x 2 x half> @intrinsic_vlse_v_nxv2f16_nxv2f16(<vscale x 2 x half>* %0, iXLen %1, iXLen %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vlse_v_nxv2f16_nxv2f16(ptr %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
@@ -1194,7 +1194,7 @@ define <vscale x 2 x half> @intrinsic_vlse_v_nxv2f16_nxv2f16(<vscale x 2 x half>
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vlse.nxv2f16(
     <vscale x 2 x half> undef,
-    <vscale x 2 x half>* %0,
+    ptr %0,
     iXLen %1,
     iXLen %2)
 
@@ -1203,13 +1203,13 @@ entry:
 
 declare <vscale x 2 x half> @llvm.riscv.vlse.mask.nxv2f16(
   <vscale x 2 x half>,
-  <vscale x 2 x half>*,
+  ptr,
   iXLen,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x half> @intrinsic_vlse_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vlse_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, ptr %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
@@ -1218,7 +1218,7 @@ define <vscale x 2 x half> @intrinsic_vlse_mask_v_nxv2f16_nxv2f16(<vscale x 2 x
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vlse.mask.nxv2f16(
     <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 2 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1228,11 +1228,11 @@ entry:
 
 declare <vscale x 4 x half> @llvm.riscv.vlse.nxv4f16(
   <vscale x 4 x half>,
-  <vscale x 4 x half>*,
+  ptr,
   iXLen,
   iXLen);
 
-define <vscale x 4 x half> @intrinsic_vlse_v_nxv4f16_nxv4f16(<vscale x 4 x half>* %0, iXLen %1, iXLen %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vlse_v_nxv4f16_nxv4f16(ptr %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
@@ -1241,7 +1241,7 @@ define <vscale x 4 x half> @intrinsic_vlse_v_nxv4f16_nxv4f16(<vscale x 4 x half>
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vlse.nxv4f16(
     <vscale x 4 x half> undef,
-    <vscale x 4 x half>* %0,
+    ptr %0,
     iXLen %1,
     iXLen %2)
 
@@ -1250,13 +1250,13 @@ entry:
 
 declare <vscale x 4 x half> @llvm.riscv.vlse.mask.nxv4f16(
   <vscale x 4 x half>,
-  <vscale x 4 x half>*,
+  ptr,
   iXLen,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x half> @intrinsic_vlse_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vlse_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, ptr %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
@@ -1265,7 +1265,7 @@ define <vscale x 4 x half> @intrinsic_vlse_mask_v_nxv4f16_nxv4f16(<vscale x 4 x
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vlse.mask.nxv4f16(
     <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 4 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1275,11 +1275,11 @@ entry:
 
 declare <vscale x 8 x half> @llvm.riscv.vlse.nxv8f16(
   <vscale x 8 x half>,
-  <vscale x 8 x half>*,
+  ptr,
   iXLen,
   iXLen);
 
-define <vscale x 8 x half> @intrinsic_vlse_v_nxv8f16_nxv8f16(<vscale x 8 x half>* %0, iXLen %1, iXLen %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vlse_v_nxv8f16_nxv8f16(ptr %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, ma
@@ -1288,7 +1288,7 @@ define <vscale x 8 x half> @intrinsic_vlse_v_nxv8f16_nxv8f16(<vscale x 8 x half>
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vlse.nxv8f16(
     <vscale x 8 x half> undef,
-    <vscale x 8 x half>* %0,
+    ptr %0,
     iXLen %1,
     iXLen %2)
 
@@ -1297,13 +1297,13 @@ entry:
 
 declare <vscale x 8 x half> @llvm.riscv.vlse.mask.nxv8f16(
   <vscale x 8 x half>,
-  <vscale x 8 x half>*,
+  ptr,
   iXLen,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x half> @intrinsic_vlse_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vlse_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, ptr %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, mu
@@ -1312,7 +1312,7 @@ define <vscale x 8 x half> @intrinsic_vlse_mask_v_nxv8f16_nxv8f16(<vscale x 8 x
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vlse.mask.nxv8f16(
     <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 8 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1322,11 +1322,11 @@ entry:
 
 declare <vscale x 16 x half> @llvm.riscv.vlse.nxv16f16(
   <vscale x 16 x half>,
-  <vscale x 16 x half>*,
+  ptr,
   iXLen,
   iXLen);
 
-define <vscale x 16 x half> @intrinsic_vlse_v_nxv16f16_nxv16f16(<vscale x 16 x half>* %0, iXLen %1, iXLen %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vlse_v_nxv16f16_nxv16f16(ptr %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
@@ -1335,7 +1335,7 @@ define <vscale x 16 x half> @intrinsic_vlse_v_nxv16f16_nxv16f16(<vscale x 16 x h
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vlse.nxv16f16(
     <vscale x 16 x half> undef,
-    <vscale x 16 x half>* %0,
+    ptr %0,
     iXLen %1,
     iXLen %2)
 
@@ -1344,13 +1344,13 @@ entry:
 
 declare <vscale x 16 x half> @llvm.riscv.vlse.mask.nxv16f16(
   <vscale x 16 x half>,
-  <vscale x 16 x half>*,
+  ptr,
   iXLen,
   <vscale x 16 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 16 x half> @intrinsic_vlse_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vlse_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, ptr %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
@@ -1359,7 +1359,7 @@ define <vscale x 16 x half> @intrinsic_vlse_mask_v_nxv16f16_nxv16f16(<vscale x 1
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vlse.mask.nxv16f16(
     <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 16 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1369,11 +1369,11 @@ entry:
 
 declare <vscale x 32 x half> @llvm.riscv.vlse.nxv32f16(
   <vscale x 32 x half>,
-  <vscale x 32 x half>*,
+  ptr,
   iXLen,
   iXLen);
 
-define <vscale x 32 x half> @intrinsic_vlse_v_nxv32f16_nxv32f16(<vscale x 32 x half>* %0, iXLen %1, iXLen %2) nounwind {
+define <vscale x 32 x half> @intrinsic_vlse_v_nxv32f16_nxv32f16(ptr %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv32f16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m8, ta, ma
@@ -1382,7 +1382,7 @@ define <vscale x 32 x half> @intrinsic_vlse_v_nxv32f16_nxv32f16(<vscale x 32 x h
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vlse.nxv32f16(
     <vscale x 32 x half> undef,
-    <vscale x 32 x half>* %0,
+    ptr %0,
     iXLen %1,
     iXLen %2)
 
@@ -1391,13 +1391,13 @@ entry:
 
 declare <vscale x 32 x half> @llvm.riscv.vlse.mask.nxv32f16(
   <vscale x 32 x half>,
-  <vscale x 32 x half>*,
+  ptr,
   iXLen,
   <vscale x 32 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 32 x half> @intrinsic_vlse_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+define <vscale x 32 x half> @intrinsic_vlse_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, ptr %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32f16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m8, ta, mu
@@ -1406,7 +1406,7 @@ define <vscale x 32 x half> @intrinsic_vlse_mask_v_nxv32f16_nxv32f16(<vscale x 3
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vlse.mask.nxv32f16(
     <vscale x 32 x half> %0,
-    <vscale x 32 x half>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 32 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1416,11 +1416,11 @@ entry:
 
 declare <vscale x 1 x i8> @llvm.riscv.vlse.nxv1i8(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   iXLen,
   iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vlse_v_nxv1i8_nxv1i8(<vscale x 1 x i8>* %0, iXLen %1, iXLen %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vlse_v_nxv1i8_nxv1i8(ptr %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
@@ -1429,7 +1429,7 @@ define <vscale x 1 x i8> @intrinsic_vlse_v_nxv1i8_nxv1i8(<vscale x 1 x i8>* %0,
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vlse.nxv1i8(
     <vscale x 1 x i8> undef,
-    <vscale x 1 x i8>* %0,
+    ptr %0,
     iXLen %1,
     iXLen %2)
 
@@ -1438,13 +1438,13 @@ entry:
 
 declare <vscale x 1 x i8> @llvm.riscv.vlse.mask.nxv1i8(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   iXLen,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vlse_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vlse_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
@@ -1453,7 +1453,7 @@ define <vscale x 1 x i8> @intrinsic_vlse_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8>
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vlse.mask.nxv1i8(
     <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1463,11 +1463,11 @@ entry:
 
 declare <vscale x 2 x i8> @llvm.riscv.vlse.nxv2i8(
   <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
+  ptr,
   iXLen,
   iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vlse_v_nxv2i8_nxv2i8(<vscale x 2 x i8>* %0, iXLen %1, iXLen %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vlse_v_nxv2i8_nxv2i8(ptr %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, ma
@@ -1476,7 +1476,7 @@ define <vscale x 2 x i8> @intrinsic_vlse_v_nxv2i8_nxv2i8(<vscale x 2 x i8>* %0,
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vlse.nxv2i8(
     <vscale x 2 x i8> undef,
-    <vscale x 2 x i8>* %0,
+    ptr %0,
     iXLen %1,
     iXLen %2)
 
@@ -1485,13 +1485,13 @@ entry:
 
 declare <vscale x 2 x i8> @llvm.riscv.vlse.mask.nxv2i8(
   <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
+  ptr,
   iXLen,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vlse_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vlse_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, ptr %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, mu
@@ -1500,7 +1500,7 @@ define <vscale x 2 x i8> @intrinsic_vlse_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8>
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vlse.mask.nxv2i8(
     <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 2 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1510,11 +1510,11 @@ entry:
 
 declare <vscale x 4 x i8> @llvm.riscv.vlse.nxv4i8(
   <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
+  ptr,
   iXLen,
   iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vlse_v_nxv4i8_nxv4i8(<vscale x 4 x i8>* %0, iXLen %1, iXLen %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vlse_v_nxv4i8_nxv4i8(ptr %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, ma
@@ -1523,7 +1523,7 @@ define <vscale x 4 x i8> @intrinsic_vlse_v_nxv4i8_nxv4i8(<vscale x 4 x i8>* %0,
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vlse.nxv4i8(
     <vscale x 4 x i8> undef,
-    <vscale x 4 x i8>* %0,
+    ptr %0,
     iXLen %1,
     iXLen %2)
 
@@ -1532,13 +1532,13 @@ entry:
 
 declare <vscale x 4 x i8> @llvm.riscv.vlse.mask.nxv4i8(
   <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
+  ptr,
   iXLen,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vlse_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vlse_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, ptr %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, mu
@@ -1547,7 +1547,7 @@ define <vscale x 4 x i8> @intrinsic_vlse_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8>
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vlse.mask.nxv4i8(
     <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 4 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1557,11 +1557,11 @@ entry:
 
 declare <vscale x 8 x i8> @llvm.riscv.vlse.nxv8i8(
   <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
+  ptr,
   iXLen,
   iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vlse_v_nxv8i8_nxv8i8(<vscale x 8 x i8>* %0, iXLen %1, iXLen %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vlse_v_nxv8i8_nxv8i8(ptr %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, ma
@@ -1570,7 +1570,7 @@ define <vscale x 8 x i8> @intrinsic_vlse_v_nxv8i8_nxv8i8(<vscale x 8 x i8>* %0,
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vlse.nxv8i8(
     <vscale x 8 x i8> undef,
-    <vscale x 8 x i8>* %0,
+    ptr %0,
     iXLen %1,
     iXLen %2)
 
@@ -1579,13 +1579,13 @@ entry:
 
 declare <vscale x 8 x i8> @llvm.riscv.vlse.mask.nxv8i8(
   <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
+  ptr,
   iXLen,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vlse_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vlse_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, ptr %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, mu
@@ -1594,7 +1594,7 @@ define <vscale x 8 x i8> @intrinsic_vlse_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8>
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vlse.mask.nxv8i8(
     <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 8 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1604,11 +1604,11 @@ entry:
 
 declare <vscale x 16 x i8> @llvm.riscv.vlse.nxv16i8(
   <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
+  ptr,
   iXLen,
   iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vlse_v_nxv16i8_nxv16i8(<vscale x 16 x i8>* %0, iXLen %1, iXLen %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vlse_v_nxv16i8_nxv16i8(ptr %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, ma
@@ -1617,7 +1617,7 @@ define <vscale x 16 x i8> @intrinsic_vlse_v_nxv16i8_nxv16i8(<vscale x 16 x i8>*
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vlse.nxv16i8(
     <vscale x 16 x i8> undef,
-    <vscale x 16 x i8>* %0,
+    ptr %0,
     iXLen %1,
     iXLen %2)
 
@@ -1626,13 +1626,13 @@ entry:
 
 declare <vscale x 16 x i8> @llvm.riscv.vlse.mask.nxv16i8(
   <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
+  ptr,
   iXLen,
   <vscale x 16 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vlse_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vlse_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, ptr %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
@@ -1641,7 +1641,7 @@ define <vscale x 16 x i8> @intrinsic_vlse_mask_v_nxv16i8_nxv16i8(<vscale x 16 x
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vlse.mask.nxv16i8(
     <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 16 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1651,11 +1651,11 @@ entry:
 
 declare <vscale x 32 x i8> @llvm.riscv.vlse.nxv32i8(
   <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
+  ptr,
   iXLen,
   iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vlse_v_nxv32i8_nxv32i8(<vscale x 32 x i8>* %0, iXLen %1, iXLen %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vlse_v_nxv32i8_nxv32i8(ptr %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m4, ta, ma
@@ -1664,7 +1664,7 @@ define <vscale x 32 x i8> @intrinsic_vlse_v_nxv32i8_nxv32i8(<vscale x 32 x i8>*
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vlse.nxv32i8(
     <vscale x 32 x i8> undef,
-    <vscale x 32 x i8>* %0,
+    ptr %0,
     iXLen %1,
     iXLen %2)
 
@@ -1673,13 +1673,13 @@ entry:
 
 declare <vscale x 32 x i8> @llvm.riscv.vlse.mask.nxv32i8(
   <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
+  ptr,
   iXLen,
   <vscale x 32 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vlse_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vlse_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, ptr %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m4, ta, mu
@@ -1688,7 +1688,7 @@ define <vscale x 32 x i8> @intrinsic_vlse_mask_v_nxv32i8_nxv32i8(<vscale x 32 x
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vlse.mask.nxv32i8(
     <vscale x 32 x i8> %0,
-    <vscale x 32 x i8>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 32 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1698,11 +1698,11 @@ entry:
 
 declare <vscale x 64 x i8> @llvm.riscv.vlse.nxv64i8(
   <vscale x 64 x i8>,
-  <vscale x 64 x i8>*,
+  ptr,
   iXLen,
   iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vlse_v_nxv64i8_nxv64i8(<vscale x 64 x i8>* %0, iXLen %1, iXLen %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vlse_v_nxv64i8_nxv64i8(ptr %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
@@ -1711,7 +1711,7 @@ define <vscale x 64 x i8> @intrinsic_vlse_v_nxv64i8_nxv64i8(<vscale x 64 x i8>*
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vlse.nxv64i8(
     <vscale x 64 x i8> undef,
-    <vscale x 64 x i8>* %0,
+    ptr %0,
     iXLen %1,
     iXLen %2)
 
@@ -1720,13 +1720,13 @@ entry:
 
 declare <vscale x 64 x i8> @llvm.riscv.vlse.mask.nxv64i8(
   <vscale x 64 x i8>,
-  <vscale x 64 x i8>*,
+  ptr,
   iXLen,
   <vscale x 64 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vlse_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, iXLen %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vlse_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, ptr %1, iXLen %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m8, ta, mu
@@ -1735,7 +1735,7 @@ define <vscale x 64 x i8> @intrinsic_vlse_mask_v_nxv64i8_nxv64i8(<vscale x 64 x
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vlse.mask.nxv64i8(
     <vscale x 64 x i8> %0,
-    <vscale x 64 x i8>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 64 x i1> %3,
     iXLen %4, iXLen 1)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll
index b80a8a6f8eade..93c821b5357c8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll
@@ -6,11 +6,11 @@
 
 declare <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i64(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   <vscale x 1 x i64>,
   i64);
 
-define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8>* %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -20,7 +20,7 @@ define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i64(
     <vscale x 1 x i8> undef,
-    <vscale x 1 x i8>* %0,
+    ptr %0,
     <vscale x 1 x i64> %1,
     i64 %2)
 
@@ -29,13 +29,13 @@ entry:
 
 declare <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
   i64,
   i64);
 
-define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -44,7 +44,7 @@ define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64(
     <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
+    ptr %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
     i64 %4, i64 1)
@@ -54,11 +54,11 @@ entry:
 
 declare <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i64(
   <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
+  ptr,
   <vscale x 2 x i64>,
   i64);
 
-define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8>* %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -68,7 +68,7 @@ define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i64(
     <vscale x 2 x i8> undef,
-    <vscale x 2 x i8>* %0,
+    ptr %0,
     <vscale x 2 x i64> %1,
     i64 %2)
 
@@ -77,13 +77,13 @@ entry:
 
 declare <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64(
   <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
+  ptr,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
   i64,
   i64);
 
-define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -92,7 +92,7 @@ define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64(
     <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
+    ptr %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
     i64 %4, i64 1)
@@ -102,11 +102,11 @@ entry:
 
 declare <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i64(
   <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
+  ptr,
   <vscale x 4 x i64>,
   i64);
 
-define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8>* %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -116,7 +116,7 @@ define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i64(
     <vscale x 4 x i8> undef,
-    <vscale x 4 x i8>* %0,
+    ptr %0,
     <vscale x 4 x i64> %1,
     i64 %2)
 
@@ -125,13 +125,13 @@ entry:
 
 declare <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64(
   <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
+  ptr,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
   i64,
   i64);
 
-define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -140,7 +140,7 @@ define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64(
     <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
+    ptr %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
     i64 %4, i64 1)
@@ -150,11 +150,11 @@ entry:
 
 declare <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i64(
   <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
+  ptr,
   <vscale x 8 x i64>,
   i64);
 
-define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8>* %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -164,7 +164,7 @@ define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i64(
     <vscale x 8 x i8> undef,
-    <vscale x 8 x i8>* %0,
+    ptr %0,
     <vscale x 8 x i64> %1,
     i64 %2)
 
@@ -173,13 +173,13 @@ entry:
 
 declare <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64(
   <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
+  ptr,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
   i64,
   i64);
 
-define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -188,7 +188,7 @@ define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64(
     <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
+    ptr %1,
     <vscale x 8 x i64> %2,
     <vscale x 8 x i1> %3,
     i64 %4, i64 1)
@@ -198,11 +198,11 @@ entry:
 
 declare <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i64(
   <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
+  ptr,
   <vscale x 1 x i64>,
   i64);
 
-define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16>* %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -212,7 +212,7 @@ define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i64(<vscale x
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i64(
     <vscale x 1 x i16> undef,
-    <vscale x 1 x i16>* %0,
+    ptr %0,
     <vscale x 1 x i64> %1,
     i64 %2)
 
@@ -221,13 +221,13 @@ entry:
 
 declare <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64(
   <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
+  ptr,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
   i64,
   i64);
 
-define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -236,7 +236,7 @@ define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i64(<vsca
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64(
     <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
+    ptr %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
     i64 %4, i64 1)
@@ -246,11 +246,11 @@ entry:
 
 declare <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i64(
   <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
+  ptr,
   <vscale x 2 x i64>,
   i64);
 
-define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16>* %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -260,7 +260,7 @@ define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i64(<vscale x
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i64(
     <vscale x 2 x i16> undef,
-    <vscale x 2 x i16>* %0,
+    ptr %0,
     <vscale x 2 x i64> %1,
     i64 %2)
 
@@ -269,13 +269,13 @@ entry:
 
 declare <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64(
   <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
+  ptr,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
   i64,
   i64);
 
-define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -284,7 +284,7 @@ define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i64(<vsca
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64(
     <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
+    ptr %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
     i64 %4, i64 1)
@@ -294,11 +294,11 @@ entry:
 
 declare <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i64(
   <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
+  ptr,
   <vscale x 4 x i64>,
   i64);
 
-define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16>* %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -308,7 +308,7 @@ define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i64(<vscale x
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i64(
     <vscale x 4 x i16> undef,
-    <vscale x 4 x i16>* %0,
+    ptr %0,
     <vscale x 4 x i64> %1,
     i64 %2)
 
@@ -317,13 +317,13 @@ entry:
 
 declare <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64(
   <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
+  ptr,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
   i64,
   i64);
 
-define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -332,7 +332,7 @@ define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i64(<vsca
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64(
     <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
+    ptr %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
     i64 %4, i64 1)
@@ -342,11 +342,11 @@ entry:
 
 declare <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i64(
   <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
+  ptr,
   <vscale x 8 x i64>,
   i64);
 
-define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16>* %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -356,7 +356,7 @@ define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i64(<vscale x
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i64(
     <vscale x 8 x i16> undef,
-    <vscale x 8 x i16>* %0,
+    ptr %0,
     <vscale x 8 x i64> %1,
     i64 %2)
 
@@ -365,13 +365,13 @@ entry:
 
 declare <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64(
   <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
+  ptr,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
   i64,
   i64);
 
-define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -380,7 +380,7 @@ define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i64(<vsca
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64(
     <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
+    ptr %1,
     <vscale x 8 x i64> %2,
     <vscale x 8 x i1> %3,
     i64 %4, i64 1)
@@ -390,11 +390,11 @@ entry:
 
 declare <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i64(
   <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
+  ptr,
   <vscale x 1 x i64>,
   i64);
 
-define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32>* %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -404,7 +404,7 @@ define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i64(<vscale x
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i64(
     <vscale x 1 x i32> undef,
-    <vscale x 1 x i32>* %0,
+    ptr %0,
     <vscale x 1 x i64> %1,
     i64 %2)
 
@@ -413,13 +413,13 @@ entry:
 
 declare <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64(
   <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
+  ptr,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
   i64,
   i64);
 
-define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -428,7 +428,7 @@ define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i64(<vsca
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
+    ptr %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
     i64 %4, i64 1)
@@ -438,11 +438,11 @@ entry:
 
 declare <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64(
   <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
+  ptr,
   <vscale x 2 x i64>,
   i64);
 
-define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32>* %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -452,7 +452,7 @@ define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i64(<vscale x
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64(
     <vscale x 2 x i32> undef,
-    <vscale x 2 x i32>* %0,
+    ptr %0,
     <vscale x 2 x i64> %1,
     i64 %2)
 
@@ -461,13 +461,13 @@ entry:
 
 declare <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64(
   <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
+  ptr,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
   i64,
   i64);
 
-define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -476,7 +476,7 @@ define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i64(<vsca
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
+    ptr %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
     i64 %4, i64 1)
@@ -486,11 +486,11 @@ entry:
 
 declare <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i64(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   <vscale x 4 x i64>,
   i64);
 
-define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32>* %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -500,7 +500,7 @@ define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i64(<vscale x
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i64(
     <vscale x 4 x i32> undef,
-    <vscale x 4 x i32>* %0,
+    ptr %0,
     <vscale x 4 x i64> %1,
     i64 %2)
 
@@ -509,13 +509,13 @@ entry:
 
 declare <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
   i64,
   i64);
 
-define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -524,7 +524,7 @@ define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i64(<vsca
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
+    ptr %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
     i64 %4, i64 1)
@@ -534,11 +534,11 @@ entry:
 
 declare <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i64(
   <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
+  ptr,
   <vscale x 8 x i64>,
   i64);
 
-define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32>* %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -548,7 +548,7 @@ define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i64(<vscale x
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i64(
     <vscale x 8 x i32> undef,
-    <vscale x 8 x i32>* %0,
+    ptr %0,
     <vscale x 8 x i64> %1,
     i64 %2)
 
@@ -557,13 +557,13 @@ entry:
 
 declare <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64(
   <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
+  ptr,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
   i64,
   i64);
 
-define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -572,7 +572,7 @@ define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i64(<vsca
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
+    ptr %1,
     <vscale x 8 x i64> %2,
     <vscale x 8 x i1> %3,
     i64 %4, i64 1)
@@ -582,11 +582,11 @@ entry:
 
 declare <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   <vscale x 1 x i64>,
   i64);
 
-define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64>* %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -595,7 +595,7 @@ define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i64(<vscale x
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i64(
     <vscale x 1 x i64> undef,
-    <vscale x 1 x i64>* %0,
+    ptr %0,
     <vscale x 1 x i64> %1,
     i64 %2)
 
@@ -604,13 +604,13 @@ entry:
 
 declare <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
   i64,
   i64);
 
-define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -619,7 +619,7 @@ define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i64(<vsca
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
+    ptr %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
     i64 %4, i64 1)
@@ -629,11 +629,11 @@ entry:
 
 declare <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
+  ptr,
   <vscale x 2 x i64>,
   i64);
 
-define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64>* %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -642,7 +642,7 @@ define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i64(<vscale x
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i64(
     <vscale x 2 x i64> undef,
-    <vscale x 2 x i64>* %0,
+    ptr %0,
     <vscale x 2 x i64> %1,
     i64 %2)
 
@@ -651,13 +651,13 @@ entry:
 
 declare <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
+  ptr,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
   i64,
   i64);
 
-define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -666,7 +666,7 @@ define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i64(<vsca
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
+    ptr %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
     i64 %4, i64 1)
@@ -676,11 +676,11 @@ entry:
 
 declare <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
+  ptr,
   <vscale x 4 x i64>,
   i64);
 
-define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64>* %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -689,7 +689,7 @@ define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i64(<vscale x
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i64(
     <vscale x 4 x i64> undef,
-    <vscale x 4 x i64>* %0,
+    ptr %0,
     <vscale x 4 x i64> %1,
     i64 %2)
 
@@ -698,13 +698,13 @@ entry:
 
 declare <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
+  ptr,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
   i64,
   i64);
 
-define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -713,7 +713,7 @@ define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i64(<vsca
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
+    ptr %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
     i64 %4, i64 1)
@@ -723,11 +723,11 @@ entry:
 
 declare <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i64(
   <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
+  ptr,
   <vscale x 8 x i64>,
   i64);
 
-define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64>* %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -736,7 +736,7 @@ define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i64(<vscale x
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i64(
     <vscale x 8 x i64> undef,
-    <vscale x 8 x i64>* %0,
+    ptr %0,
     <vscale x 8 x i64> %1,
     i64 %2)
 
@@ -745,13 +745,13 @@ entry:
 
 declare <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64(
   <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
+  ptr,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
   i64,
   i64);
 
-define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -760,7 +760,7 @@ define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i64(<vsca
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
+    ptr %1,
     <vscale x 8 x i64> %2,
     <vscale x 8 x i1> %3,
     i64 %4, i64 1)
@@ -770,11 +770,11 @@ entry:
 
 declare <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i64(
   <vscale x 1 x half>,
-  <vscale x 1 x half>*,
+  ptr,
   <vscale x 1 x i64>,
   i64);
 
-define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half>* %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -784,7 +784,7 @@ define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i64(<vscale x
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i64(
     <vscale x 1 x half> undef,
-    <vscale x 1 x half>* %0,
+    ptr %0,
     <vscale x 1 x i64> %1,
     i64 %2)
 
@@ -793,13 +793,13 @@ entry:
 
 declare <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i64(
   <vscale x 1 x half>,
-  <vscale x 1 x half>*,
+  ptr,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
   i64,
   i64);
 
-define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -808,7 +808,7 @@ define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i64(<vsc
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i64(
     <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
+    ptr %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
     i64 %4, i64 1)
@@ -818,11 +818,11 @@ entry:
 
 declare <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i64(
   <vscale x 2 x half>,
-  <vscale x 2 x half>*,
+  ptr,
   <vscale x 2 x i64>,
   i64);
 
-define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half>* %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -832,7 +832,7 @@ define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i64(<vscale x
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i64(
     <vscale x 2 x half> undef,
-    <vscale x 2 x half>* %0,
+    ptr %0,
     <vscale x 2 x i64> %1,
     i64 %2)
 
@@ -841,13 +841,13 @@ entry:
 
 declare <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i64(
   <vscale x 2 x half>,
-  <vscale x 2 x half>*,
+  ptr,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
   i64,
   i64);
 
-define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -856,7 +856,7 @@ define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i64(<vsc
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i64(
     <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
+    ptr %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
     i64 %4, i64 1)
@@ -866,11 +866,11 @@ entry:
 
 declare <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i64(
   <vscale x 4 x half>,
-  <vscale x 4 x half>*,
+  ptr,
   <vscale x 4 x i64>,
   i64);
 
-define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half>* %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -880,7 +880,7 @@ define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i64(<vscale x
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i64(
     <vscale x 4 x half> undef,
-    <vscale x 4 x half>* %0,
+    ptr %0,
     <vscale x 4 x i64> %1,
     i64 %2)
 
@@ -889,13 +889,13 @@ entry:
 
 declare <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i64(
   <vscale x 4 x half>,
-  <vscale x 4 x half>*,
+  ptr,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
   i64,
   i64);
 
-define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -904,7 +904,7 @@ define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i64(<vsc
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i64(
     <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
+    ptr %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
     i64 %4, i64 1)
@@ -914,11 +914,11 @@ entry:
 
 declare <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i64(
   <vscale x 8 x half>,
-  <vscale x 8 x half>*,
+  ptr,
   <vscale x 8 x i64>,
   i64);
 
-define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half>* %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -928,7 +928,7 @@ define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i64(<vscale x
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i64(
     <vscale x 8 x half> undef,
-    <vscale x 8 x half>* %0,
+    ptr %0,
     <vscale x 8 x i64> %1,
     i64 %2)
 
@@ -937,13 +937,13 @@ entry:
 
 declare <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i64(
   <vscale x 8 x half>,
-  <vscale x 8 x half>*,
+  ptr,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
   i64,
   i64);
 
-define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -952,7 +952,7 @@ define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i64(<vsc
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i64(
     <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
+    ptr %1,
     <vscale x 8 x i64> %2,
     <vscale x 8 x i1> %3,
     i64 %4, i64 1)
@@ -962,11 +962,11 @@ entry:
 
 declare <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i64(
   <vscale x 1 x float>,
-  <vscale x 1 x float>*,
+  ptr,
   <vscale x 1 x i64>,
   i64);
 
-define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float>* %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -976,7 +976,7 @@ define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i64(<vscale
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i64(
     <vscale x 1 x float> undef,
-    <vscale x 1 x float>* %0,
+    ptr %0,
     <vscale x 1 x i64> %1,
     i64 %2)
 
@@ -985,13 +985,13 @@ entry:
 
 declare <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64(
   <vscale x 1 x float>,
-  <vscale x 1 x float>*,
+  ptr,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
   i64,
   i64);
 
-define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -1000,7 +1000,7 @@ define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i64(<vs
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64(
     <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
+    ptr %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
     i64 %4, i64 1)
@@ -1010,11 +1010,11 @@ entry:
 
 declare <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i64(
   <vscale x 2 x float>,
-  <vscale x 2 x float>*,
+  ptr,
   <vscale x 2 x i64>,
   i64);
 
-define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float>* %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1024,7 +1024,7 @@ define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i64(<vscale
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i64(
     <vscale x 2 x float> undef,
-    <vscale x 2 x float>* %0,
+    ptr %0,
     <vscale x 2 x i64> %1,
     i64 %2)
 
@@ -1033,13 +1033,13 @@ entry:
 
 declare <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64(
   <vscale x 2 x float>,
-  <vscale x 2 x float>*,
+  ptr,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
   i64,
   i64);
 
-define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -1048,7 +1048,7 @@ define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i64(<vs
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64(
     <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
+    ptr %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
     i64 %4, i64 1)
@@ -1058,11 +1058,11 @@ entry:
 
 declare <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i64(
   <vscale x 4 x float>,
-  <vscale x 4 x float>*,
+  ptr,
   <vscale x 4 x i64>,
   i64);
 
-define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float>* %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1072,7 +1072,7 @@ define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i64(<vscale
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i64(
     <vscale x 4 x float> undef,
-    <vscale x 4 x float>* %0,
+    ptr %0,
     <vscale x 4 x i64> %1,
     i64 %2)
 
@@ -1081,13 +1081,13 @@ entry:
 
 declare <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64(
   <vscale x 4 x float>,
-  <vscale x 4 x float>*,
+  ptr,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
   i64,
   i64);
 
-define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -1096,7 +1096,7 @@ define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i64(<vs
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64(
     <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
+    ptr %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
     i64 %4, i64 1)
@@ -1106,11 +1106,11 @@ entry:
 
 declare <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i64(
   <vscale x 8 x float>,
-  <vscale x 8 x float>*,
+  ptr,
   <vscale x 8 x i64>,
   i64);
 
-define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float>* %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1120,7 +1120,7 @@ define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i64(<vscale
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i64(
     <vscale x 8 x float> undef,
-    <vscale x 8 x float>* %0,
+    ptr %0,
     <vscale x 8 x i64> %1,
     i64 %2)
 
@@ -1129,13 +1129,13 @@ entry:
 
 declare <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64(
   <vscale x 8 x float>,
-  <vscale x 8 x float>*,
+  ptr,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
   i64,
   i64);
 
-define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -1144,7 +1144,7 @@ define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i64(<vs
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64(
     <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
+    ptr %1,
     <vscale x 8 x i64> %2,
     <vscale x 8 x i1> %3,
     i64 %4, i64 1)
@@ -1154,11 +1154,11 @@ entry:
 
 declare <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i64(
   <vscale x 1 x double>,
-  <vscale x 1 x double>*,
+  ptr,
   <vscale x 1 x i64>,
   i64);
 
-define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double>* %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -1167,7 +1167,7 @@ define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i64(<vscale
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i64(
     <vscale x 1 x double> undef,
-    <vscale x 1 x double>* %0,
+    ptr %0,
     <vscale x 1 x i64> %1,
     i64 %2)
 
@@ -1176,13 +1176,13 @@ entry:
 
 declare <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64(
   <vscale x 1 x double>,
-  <vscale x 1 x double>*,
+  ptr,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
   i64,
   i64);
 
-define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -1191,7 +1191,7 @@ define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i64(<v
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64(
     <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
+    ptr %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
     i64 %4, i64 1)
@@ -1201,11 +1201,11 @@ entry:
 
 declare <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i64(
   <vscale x 2 x double>,
-  <vscale x 2 x double>*,
+  ptr,
   <vscale x 2 x i64>,
   i64);
 
-define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double>* %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -1214,7 +1214,7 @@ define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i64(<vscale
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i64(
     <vscale x 2 x double> undef,
-    <vscale x 2 x double>* %0,
+    ptr %0,
     <vscale x 2 x i64> %1,
     i64 %2)
 
@@ -1223,13 +1223,13 @@ entry:
 
 declare <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64(
   <vscale x 2 x double>,
-  <vscale x 2 x double>*,
+  ptr,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
   i64,
   i64);
 
-define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -1238,7 +1238,7 @@ define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i64(<v
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64(
     <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
+    ptr %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
     i64 %4, i64 1)
@@ -1248,11 +1248,11 @@ entry:
 
 declare <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i64(
   <vscale x 4 x double>,
-  <vscale x 4 x double>*,
+  ptr,
   <vscale x 4 x i64>,
   i64);
 
-define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double>* %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -1261,7 +1261,7 @@ define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i64(<vscale
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i64(
     <vscale x 4 x double> undef,
-    <vscale x 4 x double>* %0,
+    ptr %0,
     <vscale x 4 x i64> %1,
     i64 %2)
 
@@ -1270,13 +1270,13 @@ entry:
 
 declare <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64(
   <vscale x 4 x double>,
-  <vscale x 4 x double>*,
+  ptr,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
   i64,
   i64);
 
-define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -1285,7 +1285,7 @@ define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i64(<v
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64(
     <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
+    ptr %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
     i64 %4, i64 1)
@@ -1295,11 +1295,11 @@ entry:
 
 declare <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i64(
   <vscale x 8 x double>,
-  <vscale x 8 x double>*,
+  ptr,
   <vscale x 8 x i64>,
   i64);
 
-define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double>* %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -1308,7 +1308,7 @@ define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i64(<vscale
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i64(
     <vscale x 8 x double> undef,
-    <vscale x 8 x double>* %0,
+    ptr %0,
     <vscale x 8 x i64> %1,
     i64 %2)
 
@@ -1317,13 +1317,13 @@ entry:
 
 declare <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64(
   <vscale x 8 x double>,
-  <vscale x 8 x double>*,
+  ptr,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
   i64,
   i64);
 
-define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -1332,7 +1332,7 @@ define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i64(<v
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64(
     <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
+    ptr %1,
     <vscale x 8 x i64> %2,
     <vscale x 8 x i1> %3,
     i64 %4, i64 1)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vluxei.ll b/llvm/test/CodeGen/RISCV/rvv/vluxei.ll
index 8c53fb94a7341..679726fe66695 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vluxei.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vluxei.ll
@@ -6,11 +6,11 @@
 
 declare <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i32(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   <vscale x 1 x i32>,
   iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8>* %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -20,7 +20,7 @@ define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i32(
     <vscale x 1 x i8> undef,
-    <vscale x 1 x i8>* %0,
+    ptr %0,
     <vscale x 1 x i32> %1,
     iXLen %2)
 
@@ -29,13 +29,13 @@ entry:
 
 declare <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -44,7 +44,7 @@ define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32(
     <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
+    ptr %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -54,11 +54,11 @@ entry:
 
 declare <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i32(
   <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
+  ptr,
   <vscale x 2 x i32>,
   iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8>* %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -68,7 +68,7 @@ define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i32(
     <vscale x 2 x i8> undef,
-    <vscale x 2 x i8>* %0,
+    ptr %0,
     <vscale x 2 x i32> %1,
     iXLen %2)
 
@@ -77,13 +77,13 @@ entry:
 
 declare <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32(
   <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
+  ptr,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -92,7 +92,7 @@ define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32(
     <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
+    ptr %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
     iXLen %4, iXLen 1)
@@ -102,11 +102,11 @@ entry:
 
 declare <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i32(
   <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
+  ptr,
   <vscale x 4 x i32>,
   iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8>* %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -116,7 +116,7 @@ define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i32(
     <vscale x 4 x i8> undef,
-    <vscale x 4 x i8>* %0,
+    ptr %0,
     <vscale x 4 x i32> %1,
     iXLen %2)
 
@@ -125,13 +125,13 @@ entry:
 
 declare <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32(
   <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
+  ptr,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -140,7 +140,7 @@ define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32(
     <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
+    ptr %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
     iXLen %4, iXLen 1)
@@ -150,11 +150,11 @@ entry:
 
 declare <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i32(
   <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
+  ptr,
   <vscale x 8 x i32>,
   iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8>* %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -164,7 +164,7 @@ define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i32(
     <vscale x 8 x i8> undef,
-    <vscale x 8 x i8>* %0,
+    ptr %0,
     <vscale x 8 x i32> %1,
     iXLen %2)
 
@@ -173,13 +173,13 @@ entry:
 
 declare <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32(
   <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
+  ptr,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -188,7 +188,7 @@ define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32(
     <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
+    ptr %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
     iXLen %4, iXLen 1)
@@ -198,11 +198,11 @@ entry:
 
 declare <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i32(
   <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
+  ptr,
   <vscale x 16 x i32>,
   iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8>* %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -212,7 +212,7 @@ define <vscale x 16 x i8> @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i32(<vscale x
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i32(
     <vscale x 16 x i8> undef,
-    <vscale x 16 x i8>* %0,
+    ptr %0,
     <vscale x 16 x i32> %1,
     iXLen %2)
 
@@ -221,13 +221,13 @@ entry:
 
 declare <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32(
   <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
+  ptr,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -236,7 +236,7 @@ define <vscale x 16 x i8> @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vsc
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32(
     <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
+    ptr %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
     iXLen %4, iXLen 1)
@@ -246,11 +246,11 @@ entry:
 
 declare <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i32(
   <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
+  ptr,
   <vscale x 1 x i32>,
   iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16>* %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -260,7 +260,7 @@ define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i32(<vscale x
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i32(
     <vscale x 1 x i16> undef,
-    <vscale x 1 x i16>* %0,
+    ptr %0,
     <vscale x 1 x i32> %1,
     iXLen %2)
 
@@ -269,13 +269,13 @@ entry:
 
 declare <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32(
   <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
+  ptr,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -284,7 +284,7 @@ define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vsca
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
+    ptr %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -294,11 +294,11 @@ entry:
 
 declare <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i32(
   <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
+  ptr,
   <vscale x 2 x i32>,
   iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16>* %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -308,7 +308,7 @@ define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i32(<vscale x
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i32(
     <vscale x 2 x i16> undef,
-    <vscale x 2 x i16>* %0,
+    ptr %0,
     <vscale x 2 x i32> %1,
     iXLen %2)
 
@@ -317,13 +317,13 @@ entry:
 
 declare <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32(
   <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
+  ptr,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -332,7 +332,7 @@ define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vsca
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
+    ptr %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
     iXLen %4, iXLen 1)
@@ -342,11 +342,11 @@ entry:
 
 declare <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i32(
   <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
+  ptr,
   <vscale x 4 x i32>,
   iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16>* %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -356,7 +356,7 @@ define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i32(<vscale x
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i32(
     <vscale x 4 x i16> undef,
-    <vscale x 4 x i16>* %0,
+    ptr %0,
     <vscale x 4 x i32> %1,
     iXLen %2)
 
@@ -365,13 +365,13 @@ entry:
 
 declare <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32(
   <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
+  ptr,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -380,7 +380,7 @@ define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vsca
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
+    ptr %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
     iXLen %4, iXLen 1)
@@ -390,11 +390,11 @@ entry:
 
 declare <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i32(
   <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
+  ptr,
   <vscale x 8 x i32>,
   iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16>* %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -404,7 +404,7 @@ define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i32(<vscale x
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i32(
     <vscale x 8 x i16> undef,
-    <vscale x 8 x i16>* %0,
+    ptr %0,
     <vscale x 8 x i32> %1,
     iXLen %2)
 
@@ -413,13 +413,13 @@ entry:
 
 declare <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32(
   <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
+  ptr,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -428,7 +428,7 @@ define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vsca
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
+    ptr %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
     iXLen %4, iXLen 1)
@@ -438,11 +438,11 @@ entry:
 
 declare <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i32(
   <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
+  ptr,
   <vscale x 16 x i32>,
   iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16>* %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -452,7 +452,7 @@ define <vscale x 16 x i16> @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i32(<vscal
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i32(
     <vscale x 16 x i16> undef,
-    <vscale x 16 x i16>* %0,
+    ptr %0,
     <vscale x 16 x i32> %1,
     iXLen %2)
 
@@ -461,13 +461,13 @@ entry:
 
 declare <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32(
   <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
+  ptr,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -476,7 +476,7 @@ define <vscale x 16 x i16> @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
+    ptr %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
     iXLen %4, iXLen 1)
@@ -486,11 +486,11 @@ entry:
 
 declare <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
+  ptr,
   <vscale x 1 x i32>,
   iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32>* %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -499,7 +499,7 @@ define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i32(<vscale x
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i32(
     <vscale x 1 x i32> undef,
-    <vscale x 1 x i32>* %0,
+    ptr %0,
     <vscale x 1 x i32> %1,
     iXLen %2)
 
@@ -508,13 +508,13 @@ entry:
 
 declare <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
+  ptr,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -523,7 +523,7 @@ define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vsca
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
+    ptr %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -533,11 +533,11 @@ entry:
 
 declare <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
+  ptr,
   <vscale x 2 x i32>,
   iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32>* %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -546,7 +546,7 @@ define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i32(<vscale x
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i32(
     <vscale x 2 x i32> undef,
-    <vscale x 2 x i32>* %0,
+    ptr %0,
     <vscale x 2 x i32> %1,
     iXLen %2)
 
@@ -555,13 +555,13 @@ entry:
 
 declare <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
+  ptr,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -570,7 +570,7 @@ define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vsca
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
+    ptr %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
     iXLen %4, iXLen 1)
@@ -580,11 +580,11 @@ entry:
 
 declare <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   <vscale x 4 x i32>,
   iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32>* %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -593,7 +593,7 @@ define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i32(<vscale x
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i32(
     <vscale x 4 x i32> undef,
-    <vscale x 4 x i32>* %0,
+    ptr %0,
     <vscale x 4 x i32> %1,
     iXLen %2)
 
@@ -602,13 +602,13 @@ entry:
 
 declare <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -617,7 +617,7 @@ define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vsca
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
+    ptr %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
     iXLen %4, iXLen 1)
@@ -627,11 +627,11 @@ entry:
 
 declare <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
+  ptr,
   <vscale x 8 x i32>,
   iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32>* %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -640,7 +640,7 @@ define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i32(<vscale x
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i32(
     <vscale x 8 x i32> undef,
-    <vscale x 8 x i32>* %0,
+    ptr %0,
     <vscale x 8 x i32> %1,
     iXLen %2)
 
@@ -649,13 +649,13 @@ entry:
 
 declare <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
+  ptr,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -664,7 +664,7 @@ define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vsca
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
+    ptr %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
     iXLen %4, iXLen 1)
@@ -674,11 +674,11 @@ entry:
 
 declare <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
+  ptr,
   <vscale x 16 x i32>,
   iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32>* %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -687,7 +687,7 @@ define <vscale x 16 x i32> @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i32(<vscal
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i32(
     <vscale x 16 x i32> undef,
-    <vscale x 16 x i32>* %0,
+    ptr %0,
     <vscale x 16 x i32> %1,
     iXLen %2)
 
@@ -696,13 +696,13 @@ entry:
 
 declare <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
+  ptr,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -711,7 +711,7 @@ define <vscale x 16 x i32> @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
+    ptr %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
     iXLen %4, iXLen 1)
@@ -721,11 +721,11 @@ entry:
 
 declare <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i32(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   <vscale x 1 x i32>,
   iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64>* %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -735,7 +735,7 @@ define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i32(<vscale x
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i32(
     <vscale x 1 x i64> undef,
-    <vscale x 1 x i64>* %0,
+    ptr %0,
     <vscale x 1 x i32> %1,
     iXLen %2)
 
@@ -744,13 +744,13 @@ entry:
 
 declare <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -759,7 +759,7 @@ define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vsca
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
+    ptr %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -769,11 +769,11 @@ entry:
 
 declare <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i32(
   <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
+  ptr,
   <vscale x 2 x i32>,
   iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64>* %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -783,7 +783,7 @@ define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i32(<vscale x
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i32(
     <vscale x 2 x i64> undef,
-    <vscale x 2 x i64>* %0,
+    ptr %0,
     <vscale x 2 x i32> %1,
     iXLen %2)
 
@@ -792,13 +792,13 @@ entry:
 
 declare <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32(
   <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
+  ptr,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -807,7 +807,7 @@ define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vsca
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
+    ptr %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
     iXLen %4, iXLen 1)
@@ -817,11 +817,11 @@ entry:
 
 declare <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i32(
   <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
+  ptr,
   <vscale x 4 x i32>,
   iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64>* %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -831,7 +831,7 @@ define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i32(<vscale x
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i32(
     <vscale x 4 x i64> undef,
-    <vscale x 4 x i64>* %0,
+    ptr %0,
     <vscale x 4 x i32> %1,
     iXLen %2)
 
@@ -840,13 +840,13 @@ entry:
 
 declare <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32(
   <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
+  ptr,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -855,7 +855,7 @@ define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vsca
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
+    ptr %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
     iXLen %4, iXLen 1)
@@ -865,11 +865,11 @@ entry:
 
 declare <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i32(
   <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
+  ptr,
   <vscale x 8 x i32>,
   iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64>* %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -879,7 +879,7 @@ define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i32(<vscale x
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i32(
     <vscale x 8 x i64> undef,
-    <vscale x 8 x i64>* %0,
+    ptr %0,
     <vscale x 8 x i32> %1,
     iXLen %2)
 
@@ -888,13 +888,13 @@ entry:
 
 declare <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32(
   <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
+  ptr,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -903,7 +903,7 @@ define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vsca
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
+    ptr %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
     iXLen %4, iXLen 1)
@@ -913,11 +913,11 @@ entry:
 
 declare <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i32(
   <vscale x 1 x half>,
-  <vscale x 1 x half>*,
+  ptr,
   <vscale x 1 x i32>,
   iXLen);
 
-define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half>* %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -927,7 +927,7 @@ define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i32(<vscale x
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i32(
     <vscale x 1 x half> undef,
-    <vscale x 1 x half>* %0,
+    ptr %0,
     <vscale x 1 x i32> %1,
     iXLen %2)
 
@@ -936,13 +936,13 @@ entry:
 
 declare <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i32(
   <vscale x 1 x half>,
-  <vscale x 1 x half>*,
+  ptr,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -951,7 +951,7 @@ define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vsc
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i32(
     <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
+    ptr %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -961,11 +961,11 @@ entry:
 
 declare <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i32(
   <vscale x 2 x half>,
-  <vscale x 2 x half>*,
+  ptr,
   <vscale x 2 x i32>,
   iXLen);
 
-define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half>* %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -975,7 +975,7 @@ define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i32(<vscale x
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i32(
     <vscale x 2 x half> undef,
-    <vscale x 2 x half>* %0,
+    ptr %0,
     <vscale x 2 x i32> %1,
     iXLen %2)
 
@@ -984,13 +984,13 @@ entry:
 
 declare <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i32(
   <vscale x 2 x half>,
-  <vscale x 2 x half>*,
+  ptr,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -999,7 +999,7 @@ define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vsc
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i32(
     <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
+    ptr %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1009,11 +1009,11 @@ entry:
 
 declare <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i32(
   <vscale x 4 x half>,
-  <vscale x 4 x half>*,
+  ptr,
   <vscale x 4 x i32>,
   iXLen);
 
-define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half>* %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1023,7 +1023,7 @@ define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i32(<vscale x
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i32(
     <vscale x 4 x half> undef,
-    <vscale x 4 x half>* %0,
+    ptr %0,
     <vscale x 4 x i32> %1,
     iXLen %2)
 
@@ -1032,13 +1032,13 @@ entry:
 
 declare <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i32(
   <vscale x 4 x half>,
-  <vscale x 4 x half>*,
+  ptr,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1047,7 +1047,7 @@ define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vsc
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i32(
     <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
+    ptr %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1057,11 +1057,11 @@ entry:
 
 declare <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i32(
   <vscale x 8 x half>,
-  <vscale x 8 x half>*,
+  ptr,
   <vscale x 8 x i32>,
   iXLen);
 
-define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half>* %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1071,7 +1071,7 @@ define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i32(<vscale x
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i32(
     <vscale x 8 x half> undef,
-    <vscale x 8 x half>* %0,
+    ptr %0,
     <vscale x 8 x i32> %1,
     iXLen %2)
 
@@ -1080,13 +1080,13 @@ entry:
 
 declare <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i32(
   <vscale x 8 x half>,
-  <vscale x 8 x half>*,
+  ptr,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1095,7 +1095,7 @@ define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vsc
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i32(
     <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
+    ptr %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1105,11 +1105,11 @@ entry:
 
 declare <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i32(
   <vscale x 16 x half>,
-  <vscale x 16 x half>*,
+  ptr,
   <vscale x 16 x i32>,
   iXLen);
 
-define <vscale x 16 x half> @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half>* %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1119,7 +1119,7 @@ define <vscale x 16 x half> @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i32(<vsca
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i32(
     <vscale x 16 x half> undef,
-    <vscale x 16 x half>* %0,
+    ptr %0,
     <vscale x 16 x i32> %1,
     iXLen %2)
 
@@ -1128,13 +1128,13 @@ entry:
 
 declare <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i32(
   <vscale x 16 x half>,
-  <vscale x 16 x half>*,
+  ptr,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 16 x half> @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1143,7 +1143,7 @@ define <vscale x 16 x half> @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i32(
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i32(
     <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
+    ptr %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1153,11 +1153,11 @@ entry:
 
 declare <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i32(
   <vscale x 1 x float>,
-  <vscale x 1 x float>*,
+  ptr,
   <vscale x 1 x i32>,
   iXLen);
 
-define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float>* %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1166,7 +1166,7 @@ define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i32(<vscale
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i32(
     <vscale x 1 x float> undef,
-    <vscale x 1 x float>* %0,
+    ptr %0,
     <vscale x 1 x i32> %1,
     iXLen %2)
 
@@ -1175,13 +1175,13 @@ entry:
 
 declare <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32(
   <vscale x 1 x float>,
-  <vscale x 1 x float>*,
+  ptr,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -1190,7 +1190,7 @@ define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vs
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32(
     <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
+    ptr %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1200,11 +1200,11 @@ entry:
 
 declare <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i32(
   <vscale x 2 x float>,
-  <vscale x 2 x float>*,
+  ptr,
   <vscale x 2 x i32>,
   iXLen);
 
-define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float>* %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1213,7 +1213,7 @@ define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i32(<vscale
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i32(
     <vscale x 2 x float> undef,
-    <vscale x 2 x float>* %0,
+    ptr %0,
     <vscale x 2 x i32> %1,
     iXLen %2)
 
@@ -1222,13 +1222,13 @@ entry:
 
 declare <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32(
   <vscale x 2 x float>,
-  <vscale x 2 x float>*,
+  ptr,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -1237,7 +1237,7 @@ define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vs
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32(
     <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
+    ptr %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1247,11 +1247,11 @@ entry:
 
 declare <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i32(
   <vscale x 4 x float>,
-  <vscale x 4 x float>*,
+  ptr,
   <vscale x 4 x i32>,
   iXLen);
 
-define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float>* %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1260,7 +1260,7 @@ define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i32(<vscale
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i32(
     <vscale x 4 x float> undef,
-    <vscale x 4 x float>* %0,
+    ptr %0,
     <vscale x 4 x i32> %1,
     iXLen %2)
 
@@ -1269,13 +1269,13 @@ entry:
 
 declare <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32(
   <vscale x 4 x float>,
-  <vscale x 4 x float>*,
+  ptr,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -1284,7 +1284,7 @@ define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vs
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32(
     <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
+    ptr %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1294,11 +1294,11 @@ entry:
 
 declare <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i32(
   <vscale x 8 x float>,
-  <vscale x 8 x float>*,
+  ptr,
   <vscale x 8 x i32>,
   iXLen);
 
-define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float>* %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1307,7 +1307,7 @@ define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i32(<vscale
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i32(
     <vscale x 8 x float> undef,
-    <vscale x 8 x float>* %0,
+    ptr %0,
     <vscale x 8 x i32> %1,
     iXLen %2)
 
@@ -1316,13 +1316,13 @@ entry:
 
 declare <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32(
   <vscale x 8 x float>,
-  <vscale x 8 x float>*,
+  ptr,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -1331,7 +1331,7 @@ define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vs
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32(
     <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
+    ptr %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1341,11 +1341,11 @@ entry:
 
 declare <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i32(
   <vscale x 16 x float>,
-  <vscale x 16 x float>*,
+  ptr,
   <vscale x 16 x i32>,
   iXLen);
 
-define <vscale x 16 x float> @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float>* %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -1354,7 +1354,7 @@ define <vscale x 16 x float> @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i32(<vsc
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i32(
     <vscale x 16 x float> undef,
-    <vscale x 16 x float>* %0,
+    ptr %0,
     <vscale x 16 x i32> %1,
     iXLen %2)
 
@@ -1363,13 +1363,13 @@ entry:
 
 declare <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32(
   <vscale x 16 x float>,
-  <vscale x 16 x float>*,
+  ptr,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 16 x float> @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -1378,7 +1378,7 @@ define <vscale x 16 x float> @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i32
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32(
     <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
+    ptr %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1388,11 +1388,11 @@ entry:
 
 declare <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i32(
   <vscale x 1 x double>,
-  <vscale x 1 x double>*,
+  ptr,
   <vscale x 1 x i32>,
   iXLen);
 
-define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double>* %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -1402,7 +1402,7 @@ define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i32(<vscale
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i32(
     <vscale x 1 x double> undef,
-    <vscale x 1 x double>* %0,
+    ptr %0,
     <vscale x 1 x i32> %1,
     iXLen %2)
 
@@ -1411,13 +1411,13 @@ entry:
 
 declare <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32(
   <vscale x 1 x double>,
-  <vscale x 1 x double>*,
+  ptr,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -1426,7 +1426,7 @@ define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<v
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32(
     <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
+    ptr %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1436,11 +1436,11 @@ entry:
 
 declare <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i32(
   <vscale x 2 x double>,
-  <vscale x 2 x double>*,
+  ptr,
   <vscale x 2 x i32>,
   iXLen);
 
-define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double>* %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -1450,7 +1450,7 @@ define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i32(<vscale
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i32(
     <vscale x 2 x double> undef,
-    <vscale x 2 x double>* %0,
+    ptr %0,
     <vscale x 2 x i32> %1,
     iXLen %2)
 
@@ -1459,13 +1459,13 @@ entry:
 
 declare <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32(
   <vscale x 2 x double>,
-  <vscale x 2 x double>*,
+  ptr,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -1474,7 +1474,7 @@ define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<v
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32(
     <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
+    ptr %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1484,11 +1484,11 @@ entry:
 
 declare <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i32(
   <vscale x 4 x double>,
-  <vscale x 4 x double>*,
+  ptr,
   <vscale x 4 x i32>,
   iXLen);
 
-define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double>* %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -1498,7 +1498,7 @@ define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i32(<vscale
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i32(
     <vscale x 4 x double> undef,
-    <vscale x 4 x double>* %0,
+    ptr %0,
     <vscale x 4 x i32> %1,
     iXLen %2)
 
@@ -1507,13 +1507,13 @@ entry:
 
 declare <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32(
   <vscale x 4 x double>,
-  <vscale x 4 x double>*,
+  ptr,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -1522,7 +1522,7 @@ define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<v
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32(
     <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
+    ptr %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1532,11 +1532,11 @@ entry:
 
 declare <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i32(
   <vscale x 8 x double>,
-  <vscale x 8 x double>*,
+  ptr,
   <vscale x 8 x i32>,
   iXLen);
 
-define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double>* %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -1546,7 +1546,7 @@ define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i32(<vscale
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i32(
     <vscale x 8 x double> undef,
-    <vscale x 8 x double>* %0,
+    ptr %0,
     <vscale x 8 x i32> %1,
     iXLen %2)
 
@@ -1555,13 +1555,13 @@ entry:
 
 declare <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32(
   <vscale x 8 x double>,
-  <vscale x 8 x double>*,
+  ptr,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -1570,7 +1570,7 @@ define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<v
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32(
     <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
+    ptr %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1580,11 +1580,11 @@ entry:
 
 declare <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i16(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   <vscale x 1 x i16>,
   iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8>* %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -1594,7 +1594,7 @@ define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i16(
     <vscale x 1 x i8> undef,
-    <vscale x 1 x i8>* %0,
+    ptr %0,
     <vscale x 1 x i16> %1,
     iXLen %2)
 
@@ -1603,13 +1603,13 @@ entry:
 
 declare <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -1618,7 +1618,7 @@ define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
+    ptr %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1628,11 +1628,11 @@ entry:
 
 declare <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i16(
   <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
+  ptr,
   <vscale x 2 x i16>,
   iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8>* %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -1642,7 +1642,7 @@ define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i16(
     <vscale x 2 x i8> undef,
-    <vscale x 2 x i8>* %0,
+    ptr %0,
     <vscale x 2 x i16> %1,
     iXLen %2)
 
@@ -1651,13 +1651,13 @@ entry:
 
 declare <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16(
   <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
+  ptr,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -1666,7 +1666,7 @@ define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
+    ptr %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1676,11 +1676,11 @@ entry:
 
 declare <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i16(
   <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
+  ptr,
   <vscale x 4 x i16>,
   iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8>* %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -1690,7 +1690,7 @@ define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i16(
     <vscale x 4 x i8> undef,
-    <vscale x 4 x i8>* %0,
+    ptr %0,
     <vscale x 4 x i16> %1,
     iXLen %2)
 
@@ -1699,13 +1699,13 @@ entry:
 
 declare <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16(
   <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
+  ptr,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -1714,7 +1714,7 @@ define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
+    ptr %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1724,11 +1724,11 @@ entry:
 
 declare <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i16(
   <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
+  ptr,
   <vscale x 8 x i16>,
   iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8>* %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -1738,7 +1738,7 @@ define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i16(
     <vscale x 8 x i8> undef,
-    <vscale x 8 x i8>* %0,
+    ptr %0,
     <vscale x 8 x i16> %1,
     iXLen %2)
 
@@ -1747,13 +1747,13 @@ entry:
 
 declare <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16(
   <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
+  ptr,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -1762,7 +1762,7 @@ define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
+    ptr %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1772,11 +1772,11 @@ entry:
 
 declare <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i16(
   <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
+  ptr,
   <vscale x 16 x i16>,
   iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8>* %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -1786,7 +1786,7 @@ define <vscale x 16 x i8> @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i16(<vscale x
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i16(
     <vscale x 16 x i8> undef,
-    <vscale x 16 x i8>* %0,
+    ptr %0,
     <vscale x 16 x i16> %1,
     iXLen %2)
 
@@ -1795,13 +1795,13 @@ entry:
 
 declare <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16(
   <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
+  ptr,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -1810,7 +1810,7 @@ define <vscale x 16 x i8> @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vsc
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
+    ptr %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1820,11 +1820,11 @@ entry:
 
 declare <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i16(
   <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
+  ptr,
   <vscale x 32 x i16>,
   iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8>* %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i16(ptr %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -1834,7 +1834,7 @@ define <vscale x 32 x i8> @intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i16(<vscale x
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i16(
     <vscale x 32 x i8> undef,
-    <vscale x 32 x i8>* %0,
+    ptr %0,
     <vscale x 32 x i16> %1,
     iXLen %2)
 
@@ -1843,13 +1843,13 @@ entry:
 
 declare <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16(
   <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
+  ptr,
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -1858,7 +1858,7 @@ define <vscale x 32 x i8> @intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vsc
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
-    <vscale x 32 x i8>* %1,
+    ptr %1,
     <vscale x 32 x i16> %2,
     <vscale x 32 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1868,11 +1868,11 @@ entry:
 
 declare <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
+  ptr,
   <vscale x 1 x i16>,
   iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16>* %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1881,7 +1881,7 @@ define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i16(<vscale x
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i16(
     <vscale x 1 x i16> undef,
-    <vscale x 1 x i16>* %0,
+    ptr %0,
     <vscale x 1 x i16> %1,
     iXLen %2)
 
@@ -1890,13 +1890,13 @@ entry:
 
 declare <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
+  ptr,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1905,7 +1905,7 @@ define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vsca
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
+    ptr %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1915,11 +1915,11 @@ entry:
 
 declare <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
+  ptr,
   <vscale x 2 x i16>,
   iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16>* %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1928,7 +1928,7 @@ define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i16(<vscale x
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i16(
     <vscale x 2 x i16> undef,
-    <vscale x 2 x i16>* %0,
+    ptr %0,
     <vscale x 2 x i16> %1,
     iXLen %2)
 
@@ -1937,13 +1937,13 @@ entry:
 
 declare <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
+  ptr,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1952,7 +1952,7 @@ define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vsca
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
+    ptr %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
     iXLen %4, iXLen 1)
@@ -1962,11 +1962,11 @@ entry:
 
 declare <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
+  ptr,
   <vscale x 4 x i16>,
   iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16>* %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1975,7 +1975,7 @@ define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i16(<vscale x
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i16(
     <vscale x 4 x i16> undef,
-    <vscale x 4 x i16>* %0,
+    ptr %0,
     <vscale x 4 x i16> %1,
     iXLen %2)
 
@@ -1984,13 +1984,13 @@ entry:
 
 declare <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
+  ptr,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1999,7 +1999,7 @@ define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vsca
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
+    ptr %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
     iXLen %4, iXLen 1)
@@ -2009,11 +2009,11 @@ entry:
 
 declare <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
+  ptr,
   <vscale x 8 x i16>,
   iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16>* %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -2022,7 +2022,7 @@ define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i16(<vscale x
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i16(
     <vscale x 8 x i16> undef,
-    <vscale x 8 x i16>* %0,
+    ptr %0,
     <vscale x 8 x i16> %1,
     iXLen %2)
 
@@ -2031,13 +2031,13 @@ entry:
 
 declare <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
+  ptr,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -2046,7 +2046,7 @@ define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vsca
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
+    ptr %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
     iXLen %4, iXLen 1)
@@ -2056,11 +2056,11 @@ entry:
 
 declare <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
+  ptr,
   <vscale x 16 x i16>,
   iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16>* %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -2069,7 +2069,7 @@ define <vscale x 16 x i16> @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i16(<vscal
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i16(
     <vscale x 16 x i16> undef,
-    <vscale x 16 x i16>* %0,
+    ptr %0,
     <vscale x 16 x i16> %1,
     iXLen %2)
 
@@ -2078,13 +2078,13 @@ entry:
 
 declare <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
+  ptr,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -2093,7 +2093,7 @@ define <vscale x 16 x i16> @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
+    ptr %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
     iXLen %4, iXLen 1)
@@ -2103,11 +2103,11 @@ entry:
 
 declare <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
+  ptr,
   <vscale x 32 x i16>,
   iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16>* %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i16(ptr %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -2116,7 +2116,7 @@ define <vscale x 32 x i16> @intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i16(<vscal
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i16(
     <vscale x 32 x i16> undef,
-    <vscale x 32 x i16>* %0,
+    ptr %0,
     <vscale x 32 x i16> %1,
     iXLen %2)
 
@@ -2125,13 +2125,13 @@ entry:
 
 declare <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
+  ptr,
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -2140,7 +2140,7 @@ define <vscale x 32 x i16> @intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
-    <vscale x 32 x i16>* %1,
+    ptr %1,
     <vscale x 32 x i16> %2,
     <vscale x 32 x i1> %3,
     iXLen %4, iXLen 1)
@@ -2150,11 +2150,11 @@ entry:
 
 declare <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i16(
   <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
+  ptr,
   <vscale x 1 x i16>,
   iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32>* %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -2164,7 +2164,7 @@ define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i16(
     <vscale x 1 x i32> undef,
-    <vscale x 1 x i32>* %0,
+    ptr %0,
     <vscale x 1 x i16> %1,
     iXLen %2)
 
@@ -2173,13 +2173,13 @@ entry:
 
 declare <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16(
   <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
+  ptr,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -2188,7 +2188,7 @@ define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vsca
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
+    ptr %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -2198,11 +2198,11 @@ entry:
 
 declare <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i16(
   <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
+  ptr,
   <vscale x 2 x i16>,
   iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32>* %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -2212,7 +2212,7 @@ define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i16(<vscale x
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i16(
     <vscale x 2 x i32> undef,
-    <vscale x 2 x i32>* %0,
+    ptr %0,
     <vscale x 2 x i16> %1,
     iXLen %2)
 
@@ -2221,13 +2221,13 @@ entry:
 
 declare <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16(
   <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
+  ptr,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -2236,7 +2236,7 @@ define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vsca
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
+    ptr %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
     iXLen %4, iXLen 1)
@@ -2246,11 +2246,11 @@ entry:
 
 declare <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i16(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   <vscale x 4 x i16>,
   iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32>* %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -2260,7 +2260,7 @@ define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i16(<vscale x
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i16(
     <vscale x 4 x i32> undef,
-    <vscale x 4 x i32>* %0,
+    ptr %0,
     <vscale x 4 x i16> %1,
     iXLen %2)
 
@@ -2269,13 +2269,13 @@ entry:
 
 declare <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -2284,7 +2284,7 @@ define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vsca
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
+    ptr %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
     iXLen %4, iXLen 1)
@@ -2294,11 +2294,11 @@ entry:
 
 declare <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i16(
   <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
+  ptr,
   <vscale x 8 x i16>,
   iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32>* %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -2308,7 +2308,7 @@ define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i16(<vscale x
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i16(
     <vscale x 8 x i32> undef,
-    <vscale x 8 x i32>* %0,
+    ptr %0,
     <vscale x 8 x i16> %1,
     iXLen %2)
 
@@ -2317,13 +2317,13 @@ entry:
 
 declare <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16(
   <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
+  ptr,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -2332,7 +2332,7 @@ define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vsca
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
+    ptr %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
     iXLen %4, iXLen 1)
@@ -2342,11 +2342,11 @@ entry:
 
 declare <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i16(
   <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
+  ptr,
   <vscale x 16 x i16>,
   iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32>* %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -2356,7 +2356,7 @@ define <vscale x 16 x i32> @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i16(<vscal
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i16(
     <vscale x 16 x i32> undef,
-    <vscale x 16 x i32>* %0,
+    ptr %0,
     <vscale x 16 x i16> %1,
     iXLen %2)
 
@@ -2365,13 +2365,13 @@ entry:
 
 declare <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16(
   <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
+  ptr,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -2380,7 +2380,7 @@ define <vscale x 16 x i32> @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
+    ptr %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
     iXLen %4, iXLen 1)
@@ -2390,11 +2390,11 @@ entry:
 
 declare <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i16(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   <vscale x 1 x i16>,
   iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64>* %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -2404,7 +2404,7 @@ define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i16(<vscale x
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i16(
     <vscale x 1 x i64> undef,
-    <vscale x 1 x i64>* %0,
+    ptr %0,
     <vscale x 1 x i16> %1,
     iXLen %2)
 
@@ -2413,13 +2413,13 @@ entry:
 
 declare <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -2428,7 +2428,7 @@ define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vsca
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
+    ptr %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -2438,11 +2438,11 @@ entry:
 
 declare <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i16(
   <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
+  ptr,
   <vscale x 2 x i16>,
   iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64>* %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -2452,7 +2452,7 @@ define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i16(<vscale x
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i16(
     <vscale x 2 x i64> undef,
-    <vscale x 2 x i64>* %0,
+    ptr %0,
     <vscale x 2 x i16> %1,
     iXLen %2)
 
@@ -2461,13 +2461,13 @@ entry:
 
 declare <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16(
   <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
+  ptr,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -2476,7 +2476,7 @@ define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vsca
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
+    ptr %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
     iXLen %4, iXLen 1)
@@ -2486,11 +2486,11 @@ entry:
 
 declare <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i16(
   <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
+  ptr,
   <vscale x 4 x i16>,
   iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64>* %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -2500,7 +2500,7 @@ define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i16(<vscale x
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i16(
     <vscale x 4 x i64> undef,
-    <vscale x 4 x i64>* %0,
+    ptr %0,
     <vscale x 4 x i16> %1,
     iXLen %2)
 
@@ -2509,13 +2509,13 @@ entry:
 
 declare <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16(
   <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
+  ptr,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -2524,7 +2524,7 @@ define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vsca
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
+    ptr %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
     iXLen %4, iXLen 1)
@@ -2534,11 +2534,11 @@ entry:
 
 declare <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i16(
   <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
+  ptr,
   <vscale x 8 x i16>,
   iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64>* %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -2548,7 +2548,7 @@ define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i16(<vscale x
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i16(
     <vscale x 8 x i64> undef,
-    <vscale x 8 x i64>* %0,
+    ptr %0,
     <vscale x 8 x i16> %1,
     iXLen %2)
 
@@ -2557,13 +2557,13 @@ entry:
 
 declare <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16(
   <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
+  ptr,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -2572,7 +2572,7 @@ define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vsca
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
+    ptr %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
     iXLen %4, iXLen 1)
@@ -2582,11 +2582,11 @@ entry:
 
 declare <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i16(
   <vscale x 1 x half>,
-  <vscale x 1 x half>*,
+  ptr,
   <vscale x 1 x i16>,
   iXLen);
 
-define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half>* %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -2595,7 +2595,7 @@ define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i16(<vscale x
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i16(
     <vscale x 1 x half> undef,
-    <vscale x 1 x half>* %0,
+    ptr %0,
     <vscale x 1 x i16> %1,
     iXLen %2)
 
@@ -2604,13 +2604,13 @@ entry:
 
 declare <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i16(
   <vscale x 1 x half>,
-  <vscale x 1 x half>*,
+  ptr,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -2619,7 +2619,7 @@ define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vsc
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i16(
     <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
+    ptr %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -2629,11 +2629,11 @@ entry:
 
 declare <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i16(
   <vscale x 2 x half>,
-  <vscale x 2 x half>*,
+  ptr,
   <vscale x 2 x i16>,
   iXLen);
 
-define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half>* %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -2642,7 +2642,7 @@ define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i16(<vscale x
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i16(
     <vscale x 2 x half> undef,
-    <vscale x 2 x half>* %0,
+    ptr %0,
     <vscale x 2 x i16> %1,
     iXLen %2)
 
@@ -2651,13 +2651,13 @@ entry:
 
 declare <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i16(
   <vscale x 2 x half>,
-  <vscale x 2 x half>*,
+  ptr,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -2666,7 +2666,7 @@ define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vsc
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i16(
     <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
+    ptr %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
     iXLen %4, iXLen 1)
@@ -2676,11 +2676,11 @@ entry:
 
 declare <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i16(
   <vscale x 4 x half>,
-  <vscale x 4 x half>*,
+  ptr,
   <vscale x 4 x i16>,
   iXLen);
 
-define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half>* %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -2689,7 +2689,7 @@ define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i16(<vscale x
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i16(
     <vscale x 4 x half> undef,
-    <vscale x 4 x half>* %0,
+    ptr %0,
     <vscale x 4 x i16> %1,
     iXLen %2)
 
@@ -2698,13 +2698,13 @@ entry:
 
 declare <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i16(
   <vscale x 4 x half>,
-  <vscale x 4 x half>*,
+  ptr,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -2713,7 +2713,7 @@ define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vsc
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i16(
     <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
+    ptr %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
     iXLen %4, iXLen 1)
@@ -2723,11 +2723,11 @@ entry:
 
 declare <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i16(
   <vscale x 8 x half>,
-  <vscale x 8 x half>*,
+  ptr,
   <vscale x 8 x i16>,
   iXLen);
 
-define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half>* %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -2736,7 +2736,7 @@ define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i16(<vscale x
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i16(
     <vscale x 8 x half> undef,
-    <vscale x 8 x half>* %0,
+    ptr %0,
     <vscale x 8 x i16> %1,
     iXLen %2)
 
@@ -2745,13 +2745,13 @@ entry:
 
 declare <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i16(
   <vscale x 8 x half>,
-  <vscale x 8 x half>*,
+  ptr,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -2760,7 +2760,7 @@ define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vsc
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i16(
     <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
+    ptr %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
     iXLen %4, iXLen 1)
@@ -2770,11 +2770,11 @@ entry:
 
 declare <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i16(
   <vscale x 16 x half>,
-  <vscale x 16 x half>*,
+  ptr,
   <vscale x 16 x i16>,
   iXLen);
 
-define <vscale x 16 x half> @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half>* %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -2783,7 +2783,7 @@ define <vscale x 16 x half> @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i16(<vsca
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i16(
     <vscale x 16 x half> undef,
-    <vscale x 16 x half>* %0,
+    ptr %0,
     <vscale x 16 x i16> %1,
     iXLen %2)
 
@@ -2792,13 +2792,13 @@ entry:
 
 declare <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i16(
   <vscale x 16 x half>,
-  <vscale x 16 x half>*,
+  ptr,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 16 x half> @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -2807,7 +2807,7 @@ define <vscale x 16 x half> @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i16(
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i16(
     <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
+    ptr %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
     iXLen %4, iXLen 1)
@@ -2817,11 +2817,11 @@ entry:
 
 declare <vscale x 32 x half> @llvm.riscv.vluxei.nxv32f16.nxv32i16(
   <vscale x 32 x half>,
-  <vscale x 32 x half>*,
+  ptr,
   <vscale x 32 x i16>,
   iXLen);
 
-define <vscale x 32 x half> @intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half>* %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
+define <vscale x 32 x half> @intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i16(ptr %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -2830,7 +2830,7 @@ define <vscale x 32 x half> @intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i16(<vsca
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vluxei.nxv32f16.nxv32i16(
     <vscale x 32 x half> undef,
-    <vscale x 32 x half>* %0,
+    ptr %0,
     <vscale x 32 x i16> %1,
     iXLen %2)
 
@@ -2839,13 +2839,13 @@ entry:
 
 declare <vscale x 32 x half> @llvm.riscv.vluxei.mask.nxv32f16.nxv32i16(
   <vscale x 32 x half>,
-  <vscale x 32 x half>*,
+  ptr,
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 32 x half> @intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+define <vscale x 32 x half> @intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -2854,7 +2854,7 @@ define <vscale x 32 x half> @intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i16(
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vluxei.mask.nxv32f16.nxv32i16(
     <vscale x 32 x half> %0,
-    <vscale x 32 x half>* %1,
+    ptr %1,
     <vscale x 32 x i16> %2,
     <vscale x 32 x i1> %3,
     iXLen %4, iXLen 1)
@@ -2864,11 +2864,11 @@ entry:
 
 declare <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i16(
   <vscale x 1 x float>,
-  <vscale x 1 x float>*,
+  ptr,
   <vscale x 1 x i16>,
   iXLen);
 
-define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float>* %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -2878,7 +2878,7 @@ define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i16(<vscale
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i16(
     <vscale x 1 x float> undef,
-    <vscale x 1 x float>* %0,
+    ptr %0,
     <vscale x 1 x i16> %1,
     iXLen %2)
 
@@ -2887,13 +2887,13 @@ entry:
 
 declare <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16(
   <vscale x 1 x float>,
-  <vscale x 1 x float>*,
+  ptr,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -2902,7 +2902,7 @@ define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vs
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16(
     <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
+    ptr %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -2912,11 +2912,11 @@ entry:
 
 declare <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i16(
   <vscale x 2 x float>,
-  <vscale x 2 x float>*,
+  ptr,
   <vscale x 2 x i16>,
   iXLen);
 
-define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float>* %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -2926,7 +2926,7 @@ define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i16(<vscale
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i16(
     <vscale x 2 x float> undef,
-    <vscale x 2 x float>* %0,
+    ptr %0,
     <vscale x 2 x i16> %1,
     iXLen %2)
 
@@ -2935,13 +2935,13 @@ entry:
 
 declare <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16(
   <vscale x 2 x float>,
-  <vscale x 2 x float>*,
+  ptr,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -2950,7 +2950,7 @@ define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vs
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16(
     <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
+    ptr %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
     iXLen %4, iXLen 1)
@@ -2960,11 +2960,11 @@ entry:
 
 declare <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i16(
   <vscale x 4 x float>,
-  <vscale x 4 x float>*,
+  ptr,
   <vscale x 4 x i16>,
   iXLen);
 
-define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float>* %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -2974,7 +2974,7 @@ define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i16(<vscale
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i16(
     <vscale x 4 x float> undef,
-    <vscale x 4 x float>* %0,
+    ptr %0,
     <vscale x 4 x i16> %1,
     iXLen %2)
 
@@ -2983,13 +2983,13 @@ entry:
 
 declare <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16(
   <vscale x 4 x float>,
-  <vscale x 4 x float>*,
+  ptr,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -2998,7 +2998,7 @@ define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vs
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16(
     <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
+    ptr %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
     iXLen %4, iXLen 1)
@@ -3008,11 +3008,11 @@ entry:
 
 declare <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i16(
   <vscale x 8 x float>,
-  <vscale x 8 x float>*,
+  ptr,
   <vscale x 8 x i16>,
   iXLen);
 
-define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float>* %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -3022,7 +3022,7 @@ define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i16(<vscale
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i16(
     <vscale x 8 x float> undef,
-    <vscale x 8 x float>* %0,
+    ptr %0,
     <vscale x 8 x i16> %1,
     iXLen %2)
 
@@ -3031,13 +3031,13 @@ entry:
 
 declare <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16(
   <vscale x 8 x float>,
-  <vscale x 8 x float>*,
+  ptr,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -3046,7 +3046,7 @@ define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vs
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16(
     <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
+    ptr %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
     iXLen %4, iXLen 1)
@@ -3056,11 +3056,11 @@ entry:
 
 declare <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i16(
   <vscale x 16 x float>,
-  <vscale x 16 x float>*,
+  ptr,
   <vscale x 16 x i16>,
   iXLen);
 
-define <vscale x 16 x float> @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float>* %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -3070,7 +3070,7 @@ define <vscale x 16 x float> @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i16(<vsc
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i16(
     <vscale x 16 x float> undef,
-    <vscale x 16 x float>* %0,
+    ptr %0,
     <vscale x 16 x i16> %1,
     iXLen %2)
 
@@ -3079,13 +3079,13 @@ entry:
 
 declare <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16(
   <vscale x 16 x float>,
-  <vscale x 16 x float>*,
+  ptr,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 16 x float> @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -3094,7 +3094,7 @@ define <vscale x 16 x float> @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i16
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16(
     <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
+    ptr %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
     iXLen %4, iXLen 1)
@@ -3104,11 +3104,11 @@ entry:
 
 declare <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i16(
   <vscale x 1 x double>,
-  <vscale x 1 x double>*,
+  ptr,
   <vscale x 1 x i16>,
   iXLen);
 
-define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double>* %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -3118,7 +3118,7 @@ define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i16(<vscale
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i16(
     <vscale x 1 x double> undef,
-    <vscale x 1 x double>* %0,
+    ptr %0,
     <vscale x 1 x i16> %1,
     iXLen %2)
 
@@ -3127,13 +3127,13 @@ entry:
 
 declare <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16(
   <vscale x 1 x double>,
-  <vscale x 1 x double>*,
+  ptr,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -3142,7 +3142,7 @@ define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<v
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16(
     <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
+    ptr %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -3152,11 +3152,11 @@ entry:
 
 declare <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i16(
   <vscale x 2 x double>,
-  <vscale x 2 x double>*,
+  ptr,
   <vscale x 2 x i16>,
   iXLen);
 
-define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double>* %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -3166,7 +3166,7 @@ define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i16(<vscale
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i16(
     <vscale x 2 x double> undef,
-    <vscale x 2 x double>* %0,
+    ptr %0,
     <vscale x 2 x i16> %1,
     iXLen %2)
 
@@ -3175,13 +3175,13 @@ entry:
 
 declare <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16(
   <vscale x 2 x double>,
-  <vscale x 2 x double>*,
+  ptr,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -3190,7 +3190,7 @@ define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<v
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16(
     <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
+    ptr %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
     iXLen %4, iXLen 1)
@@ -3200,11 +3200,11 @@ entry:
 
 declare <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i16(
   <vscale x 4 x double>,
-  <vscale x 4 x double>*,
+  ptr,
   <vscale x 4 x i16>,
   iXLen);
 
-define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double>* %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -3214,7 +3214,7 @@ define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i16(<vscale
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i16(
     <vscale x 4 x double> undef,
-    <vscale x 4 x double>* %0,
+    ptr %0,
     <vscale x 4 x i16> %1,
     iXLen %2)
 
@@ -3223,13 +3223,13 @@ entry:
 
 declare <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16(
   <vscale x 4 x double>,
-  <vscale x 4 x double>*,
+  ptr,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -3238,7 +3238,7 @@ define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<v
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16(
     <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
+    ptr %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
     iXLen %4, iXLen 1)
@@ -3248,11 +3248,11 @@ entry:
 
 declare <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i16(
   <vscale x 8 x double>,
-  <vscale x 8 x double>*,
+  ptr,
   <vscale x 8 x i16>,
   iXLen);
 
-define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double>* %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -3262,7 +3262,7 @@ define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i16(<vscale
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i16(
     <vscale x 8 x double> undef,
-    <vscale x 8 x double>* %0,
+    ptr %0,
     <vscale x 8 x i16> %1,
     iXLen %2)
 
@@ -3271,13 +3271,13 @@ entry:
 
 declare <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16(
   <vscale x 8 x double>,
-  <vscale x 8 x double>*,
+  ptr,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -3286,7 +3286,7 @@ define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<v
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16(
     <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
+    ptr %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
     iXLen %4, iXLen 1)
@@ -3296,11 +3296,11 @@ entry:
 
 declare <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   <vscale x 1 x i8>,
   iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8>* %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -3309,7 +3309,7 @@ define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i8(
     <vscale x 1 x i8> undef,
-    <vscale x 1 x i8>* %0,
+    ptr %0,
     <vscale x 1 x i8> %1,
     iXLen %2)
 
@@ -3318,13 +3318,13 @@ entry:
 
 declare <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -3333,7 +3333,7 @@ define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
+    ptr %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -3343,11 +3343,11 @@ entry:
 
 declare <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
+  ptr,
   <vscale x 2 x i8>,
   iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8>* %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -3356,7 +3356,7 @@ define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i8(
     <vscale x 2 x i8> undef,
-    <vscale x 2 x i8>* %0,
+    ptr %0,
     <vscale x 2 x i8> %1,
     iXLen %2)
 
@@ -3365,13 +3365,13 @@ entry:
 
 declare <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
+  ptr,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -3380,7 +3380,7 @@ define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
+    ptr %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
     iXLen %4, iXLen 1)
@@ -3390,11 +3390,11 @@ entry:
 
 declare <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
+  ptr,
   <vscale x 4 x i8>,
   iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8>* %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -3403,7 +3403,7 @@ define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i8(
     <vscale x 4 x i8> undef,
-    <vscale x 4 x i8>* %0,
+    ptr %0,
     <vscale x 4 x i8> %1,
     iXLen %2)
 
@@ -3412,13 +3412,13 @@ entry:
 
 declare <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
+  ptr,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -3427,7 +3427,7 @@ define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
+    ptr %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
     iXLen %4, iXLen 1)
@@ -3437,11 +3437,11 @@ entry:
 
 declare <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
+  ptr,
   <vscale x 8 x i8>,
   iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8>* %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -3450,7 +3450,7 @@ define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i8(
     <vscale x 8 x i8> undef,
-    <vscale x 8 x i8>* %0,
+    ptr %0,
     <vscale x 8 x i8> %1,
     iXLen %2)
 
@@ -3459,13 +3459,13 @@ entry:
 
 declare <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
+  ptr,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -3474,7 +3474,7 @@ define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
+    ptr %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
     iXLen %4, iXLen 1)
@@ -3484,11 +3484,11 @@ entry:
 
 declare <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
+  ptr,
   <vscale x 16 x i8>,
   iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8>* %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -3497,7 +3497,7 @@ define <vscale x 16 x i8> @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i8(<vscale x
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i8(
     <vscale x 16 x i8> undef,
-    <vscale x 16 x i8>* %0,
+    ptr %0,
     <vscale x 16 x i8> %1,
     iXLen %2)
 
@@ -3506,13 +3506,13 @@ entry:
 
 declare <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
+  ptr,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -3521,7 +3521,7 @@ define <vscale x 16 x i8> @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vsca
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
+    ptr %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
     iXLen %4, iXLen 1)
@@ -3531,11 +3531,11 @@ entry:
 
 declare <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
+  ptr,
   <vscale x 32 x i8>,
   iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8>* %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i8(ptr %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -3544,7 +3544,7 @@ define <vscale x 32 x i8> @intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i8(<vscale x
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i8(
     <vscale x 32 x i8> undef,
-    <vscale x 32 x i8>* %0,
+    ptr %0,
     <vscale x 32 x i8> %1,
     iXLen %2)
 
@@ -3553,13 +3553,13 @@ entry:
 
 declare <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
+  ptr,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -3568,7 +3568,7 @@ define <vscale x 32 x i8> @intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vsca
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
-    <vscale x 32 x i8>* %1,
+    ptr %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
     iXLen %4, iXLen 1)
@@ -3578,11 +3578,11 @@ entry:
 
 declare <vscale x 64 x i8> @llvm.riscv.vluxei.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
-  <vscale x 64 x i8>*,
+  ptr,
   <vscale x 64 x i8>,
   iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vluxei_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8>* %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vluxei_v_nxv64i8_nxv64i8_nxv64i8(ptr %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
@@ -3591,7 +3591,7 @@ define <vscale x 64 x i8> @intrinsic_vluxei_v_nxv64i8_nxv64i8_nxv64i8(<vscale x
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vluxei.nxv64i8.nxv64i8(
     <vscale x 64 x i8> undef,
-    <vscale x 64 x i8>* %0,
+    ptr %0,
     <vscale x 64 x i8> %1,
     iXLen %2)
 
@@ -3600,13 +3600,13 @@ entry:
 
 declare <vscale x 64 x i8> @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
-  <vscale x 64 x i8>*,
+  ptr,
   <vscale x 64 x i8>,
   <vscale x 64 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vluxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vluxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, ptr %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
@@ -3615,7 +3615,7 @@ define <vscale x 64 x i8> @intrinsic_vluxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vsca
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
-    <vscale x 64 x i8>* %1,
+    ptr %1,
     <vscale x 64 x i8> %2,
     <vscale x 64 x i1> %3,
     iXLen %4, iXLen 1)
@@ -3625,11 +3625,11 @@ entry:
 
 declare <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
+  ptr,
   <vscale x 1 x i8>,
   iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16>* %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -3639,7 +3639,7 @@ define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i8(
     <vscale x 1 x i16> undef,
-    <vscale x 1 x i16>* %0,
+    ptr %0,
     <vscale x 1 x i8> %1,
     iXLen %2)
 
@@ -3648,13 +3648,13 @@ entry:
 
 declare <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
+  ptr,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -3663,7 +3663,7 @@ define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscal
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
+    ptr %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -3673,11 +3673,11 @@ entry:
 
 declare <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i8(
   <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
+  ptr,
   <vscale x 2 x i8>,
   iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16>* %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -3687,7 +3687,7 @@ define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i8(
     <vscale x 2 x i16> undef,
-    <vscale x 2 x i16>* %0,
+    ptr %0,
     <vscale x 2 x i8> %1,
     iXLen %2)
 
@@ -3696,13 +3696,13 @@ entry:
 
 declare <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8(
   <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
+  ptr,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -3711,7 +3711,7 @@ define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscal
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
+    ptr %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
     iXLen %4, iXLen 1)
@@ -3721,11 +3721,11 @@ entry:
 
 declare <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i8(
   <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
+  ptr,
   <vscale x 4 x i8>,
   iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16>* %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -3735,7 +3735,7 @@ define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i8(
     <vscale x 4 x i16> undef,
-    <vscale x 4 x i16>* %0,
+    ptr %0,
     <vscale x 4 x i8> %1,
     iXLen %2)
 
@@ -3744,13 +3744,13 @@ entry:
 
 declare <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8(
   <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
+  ptr,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -3759,7 +3759,7 @@ define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscal
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
+    ptr %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
     iXLen %4, iXLen 1)
@@ -3769,11 +3769,11 @@ entry:
 
 declare <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i8(
   <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
+  ptr,
   <vscale x 8 x i8>,
   iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16>* %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -3783,7 +3783,7 @@ define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i8(
     <vscale x 8 x i16> undef,
-    <vscale x 8 x i16>* %0,
+    ptr %0,
     <vscale x 8 x i8> %1,
     iXLen %2)
 
@@ -3792,13 +3792,13 @@ entry:
 
 declare <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8(
   <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
+  ptr,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -3807,7 +3807,7 @@ define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscal
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
+    ptr %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
     iXLen %4, iXLen 1)
@@ -3817,11 +3817,11 @@ entry:
 
 declare <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i8(
   <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
+  ptr,
   <vscale x 16 x i8>,
   iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16>* %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -3831,7 +3831,7 @@ define <vscale x 16 x i16> @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i8(<vscale
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i8(
     <vscale x 16 x i16> undef,
-    <vscale x 16 x i16>* %0,
+    ptr %0,
     <vscale x 16 x i8> %1,
     iXLen %2)
 
@@ -3840,13 +3840,13 @@ entry:
 
 declare <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8(
   <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
+  ptr,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -3855,7 +3855,7 @@ define <vscale x 16 x i16> @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<v
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
+    ptr %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
     iXLen %4, iXLen 1)
@@ -3865,11 +3865,11 @@ entry:
 
 declare <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i8(
   <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
+  ptr,
   <vscale x 32 x i8>,
   iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16>* %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i8(ptr %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -3879,7 +3879,7 @@ define <vscale x 32 x i16> @intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i8(<vscale
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i8(
     <vscale x 32 x i16> undef,
-    <vscale x 32 x i16>* %0,
+    ptr %0,
     <vscale x 32 x i8> %1,
     iXLen %2)
 
@@ -3888,13 +3888,13 @@ entry:
 
 declare <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8(
   <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
+  ptr,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -3903,7 +3903,7 @@ define <vscale x 32 x i16> @intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<v
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
-    <vscale x 32 x i16>* %1,
+    ptr %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
     iXLen %4, iXLen 1)
@@ -3913,11 +3913,11 @@ entry:
 
 declare <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i8(
   <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
+  ptr,
   <vscale x 1 x i8>,
   iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32>* %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -3927,7 +3927,7 @@ define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i8(
     <vscale x 1 x i32> undef,
-    <vscale x 1 x i32>* %0,
+    ptr %0,
     <vscale x 1 x i8> %1,
     iXLen %2)
 
@@ -3936,13 +3936,13 @@ entry:
 
 declare <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8(
   <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
+  ptr,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -3951,7 +3951,7 @@ define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscal
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
+    ptr %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -3961,11 +3961,11 @@ entry:
 
 declare <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i8(
   <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
+  ptr,
   <vscale x 2 x i8>,
   iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32>* %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -3975,7 +3975,7 @@ define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i8(
     <vscale x 2 x i32> undef,
-    <vscale x 2 x i32>* %0,
+    ptr %0,
     <vscale x 2 x i8> %1,
     iXLen %2)
 
@@ -3984,13 +3984,13 @@ entry:
 
 declare <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8(
   <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
+  ptr,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -3999,7 +3999,7 @@ define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscal
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
+    ptr %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
     iXLen %4, iXLen 1)
@@ -4009,11 +4009,11 @@ entry:
 
 declare <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i8(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   <vscale x 4 x i8>,
   iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32>* %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -4023,7 +4023,7 @@ define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i8(
     <vscale x 4 x i32> undef,
-    <vscale x 4 x i32>* %0,
+    ptr %0,
     <vscale x 4 x i8> %1,
     iXLen %2)
 
@@ -4032,13 +4032,13 @@ entry:
 
 declare <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -4047,7 +4047,7 @@ define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscal
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
+    ptr %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
     iXLen %4, iXLen 1)
@@ -4057,11 +4057,11 @@ entry:
 
 declare <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i8(
   <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
+  ptr,
   <vscale x 8 x i8>,
   iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32>* %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -4071,7 +4071,7 @@ define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i8(
     <vscale x 8 x i32> undef,
-    <vscale x 8 x i32>* %0,
+    ptr %0,
     <vscale x 8 x i8> %1,
     iXLen %2)
 
@@ -4080,13 +4080,13 @@ entry:
 
 declare <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8(
   <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
+  ptr,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -4095,7 +4095,7 @@ define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscal
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
+    ptr %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
     iXLen %4, iXLen 1)
@@ -4105,11 +4105,11 @@ entry:
 
 declare <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i8(
   <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
+  ptr,
   <vscale x 16 x i8>,
   iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32>* %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -4119,7 +4119,7 @@ define <vscale x 16 x i32> @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i8(<vscale
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i8(
     <vscale x 16 x i32> undef,
-    <vscale x 16 x i32>* %0,
+    ptr %0,
     <vscale x 16 x i8> %1,
     iXLen %2)
 
@@ -4128,13 +4128,13 @@ entry:
 
 declare <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8(
   <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
+  ptr,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -4143,7 +4143,7 @@ define <vscale x 16 x i32> @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<v
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
+    ptr %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
     iXLen %4, iXLen 1)
@@ -4153,11 +4153,11 @@ entry:
 
 declare <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i8(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   <vscale x 1 x i8>,
   iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64>* %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -4167,7 +4167,7 @@ define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i8(
     <vscale x 1 x i64> undef,
-    <vscale x 1 x i64>* %0,
+    ptr %0,
     <vscale x 1 x i8> %1,
     iXLen %2)
 
@@ -4176,13 +4176,13 @@ entry:
 
 declare <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -4191,7 +4191,7 @@ define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscal
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
+    ptr %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -4201,11 +4201,11 @@ entry:
 
 declare <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i8(
   <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
+  ptr,
   <vscale x 2 x i8>,
   iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64>* %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -4215,7 +4215,7 @@ define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i8(
     <vscale x 2 x i64> undef,
-    <vscale x 2 x i64>* %0,
+    ptr %0,
     <vscale x 2 x i8> %1,
     iXLen %2)
 
@@ -4224,13 +4224,13 @@ entry:
 
 declare <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8(
   <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
+  ptr,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -4239,7 +4239,7 @@ define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscal
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
+    ptr %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
     iXLen %4, iXLen 1)
@@ -4249,11 +4249,11 @@ entry:
 
 declare <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i8(
   <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
+  ptr,
   <vscale x 4 x i8>,
   iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64>* %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -4263,7 +4263,7 @@ define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i8(
     <vscale x 4 x i64> undef,
-    <vscale x 4 x i64>* %0,
+    ptr %0,
     <vscale x 4 x i8> %1,
     iXLen %2)
 
@@ -4272,13 +4272,13 @@ entry:
 
 declare <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8(
   <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
+  ptr,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -4287,7 +4287,7 @@ define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscal
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
+    ptr %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
     iXLen %4, iXLen 1)
@@ -4297,11 +4297,11 @@ entry:
 
 declare <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i8(
   <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
+  ptr,
   <vscale x 8 x i8>,
   iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64>* %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -4311,7 +4311,7 @@ define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i8(
     <vscale x 8 x i64> undef,
-    <vscale x 8 x i64>* %0,
+    ptr %0,
     <vscale x 8 x i8> %1,
     iXLen %2)
 
@@ -4320,13 +4320,13 @@ entry:
 
 declare <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8(
   <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
+  ptr,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -4335,7 +4335,7 @@ define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscal
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
+    ptr %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
     iXLen %4, iXLen 1)
@@ -4345,11 +4345,11 @@ entry:
 
 declare <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i8(
   <vscale x 1 x half>,
-  <vscale x 1 x half>*,
+  ptr,
   <vscale x 1 x i8>,
   iXLen);
 
-define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half>* %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -4359,7 +4359,7 @@ define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i8(<vscale x
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i8(
     <vscale x 1 x half> undef,
-    <vscale x 1 x half>* %0,
+    ptr %0,
     <vscale x 1 x i8> %1,
     iXLen %2)
 
@@ -4368,13 +4368,13 @@ entry:
 
 declare <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i8(
   <vscale x 1 x half>,
-  <vscale x 1 x half>*,
+  ptr,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -4383,7 +4383,7 @@ define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vsca
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i8(
     <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
+    ptr %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -4393,11 +4393,11 @@ entry:
 
 declare <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i8(
   <vscale x 2 x half>,
-  <vscale x 2 x half>*,
+  ptr,
   <vscale x 2 x i8>,
   iXLen);
 
-define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half>* %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -4407,7 +4407,7 @@ define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i8(<vscale x
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i8(
     <vscale x 2 x half> undef,
-    <vscale x 2 x half>* %0,
+    ptr %0,
     <vscale x 2 x i8> %1,
     iXLen %2)
 
@@ -4416,13 +4416,13 @@ entry:
 
 declare <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i8(
   <vscale x 2 x half>,
-  <vscale x 2 x half>*,
+  ptr,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -4431,7 +4431,7 @@ define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vsca
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i8(
     <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
+    ptr %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
     iXLen %4, iXLen 1)
@@ -4441,11 +4441,11 @@ entry:
 
 declare <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i8(
   <vscale x 4 x half>,
-  <vscale x 4 x half>*,
+  ptr,
   <vscale x 4 x i8>,
   iXLen);
 
-define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half>* %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -4455,7 +4455,7 @@ define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i8(<vscale x
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i8(
     <vscale x 4 x half> undef,
-    <vscale x 4 x half>* %0,
+    ptr %0,
     <vscale x 4 x i8> %1,
     iXLen %2)
 
@@ -4464,13 +4464,13 @@ entry:
 
 declare <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i8(
   <vscale x 4 x half>,
-  <vscale x 4 x half>*,
+  ptr,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -4479,7 +4479,7 @@ define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vsca
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i8(
     <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
+    ptr %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
     iXLen %4, iXLen 1)
@@ -4489,11 +4489,11 @@ entry:
 
 declare <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i8(
   <vscale x 8 x half>,
-  <vscale x 8 x half>*,
+  ptr,
   <vscale x 8 x i8>,
   iXLen);
 
-define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half>* %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -4503,7 +4503,7 @@ define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i8(<vscale x
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i8(
     <vscale x 8 x half> undef,
-    <vscale x 8 x half>* %0,
+    ptr %0,
     <vscale x 8 x i8> %1,
     iXLen %2)
 
@@ -4512,13 +4512,13 @@ entry:
 
 declare <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i8(
   <vscale x 8 x half>,
-  <vscale x 8 x half>*,
+  ptr,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -4527,7 +4527,7 @@ define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vsca
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i8(
     <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
+    ptr %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
     iXLen %4, iXLen 1)
@@ -4537,11 +4537,11 @@ entry:
 
 declare <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i8(
   <vscale x 16 x half>,
-  <vscale x 16 x half>*,
+  ptr,
   <vscale x 16 x i8>,
   iXLen);
 
-define <vscale x 16 x half> @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half>* %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -4551,7 +4551,7 @@ define <vscale x 16 x half> @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i8(<vscal
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i8(
     <vscale x 16 x half> undef,
-    <vscale x 16 x half>* %0,
+    ptr %0,
     <vscale x 16 x i8> %1,
     iXLen %2)
 
@@ -4560,13 +4560,13 @@ entry:
 
 declare <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i8(
   <vscale x 16 x half>,
-  <vscale x 16 x half>*,
+  ptr,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 16 x half> @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -4575,7 +4575,7 @@ define <vscale x 16 x half> @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i8(
     <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
+    ptr %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
     iXLen %4, iXLen 1)
@@ -4585,11 +4585,11 @@ entry:
 
 declare <vscale x 32 x half> @llvm.riscv.vluxei.nxv32f16.nxv32i8(
   <vscale x 32 x half>,
-  <vscale x 32 x half>*,
+  ptr,
   <vscale x 32 x i8>,
   iXLen);
 
-define <vscale x 32 x half> @intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half>* %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
+define <vscale x 32 x half> @intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i8(ptr %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -4599,7 +4599,7 @@ define <vscale x 32 x half> @intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i8(<vscal
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vluxei.nxv32f16.nxv32i8(
     <vscale x 32 x half> undef,
-    <vscale x 32 x half>* %0,
+    ptr %0,
     <vscale x 32 x i8> %1,
     iXLen %2)
 
@@ -4608,13 +4608,13 @@ entry:
 
 declare <vscale x 32 x half> @llvm.riscv.vluxei.mask.nxv32f16.nxv32i8(
   <vscale x 32 x half>,
-  <vscale x 32 x half>*,
+  ptr,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 32 x half> @intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+define <vscale x 32 x half> @intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -4623,7 +4623,7 @@ define <vscale x 32 x half> @intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vluxei.mask.nxv32f16.nxv32i8(
     <vscale x 32 x half> %0,
-    <vscale x 32 x half>* %1,
+    ptr %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
     iXLen %4, iXLen 1)
@@ -4633,11 +4633,11 @@ entry:
 
 declare <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i8(
   <vscale x 1 x float>,
-  <vscale x 1 x float>*,
+  ptr,
   <vscale x 1 x i8>,
   iXLen);
 
-define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float>* %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -4647,7 +4647,7 @@ define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i8(<vscale x
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i8(
     <vscale x 1 x float> undef,
-    <vscale x 1 x float>* %0,
+    ptr %0,
     <vscale x 1 x i8> %1,
     iXLen %2)
 
@@ -4656,13 +4656,13 @@ entry:
 
 declare <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8(
   <vscale x 1 x float>,
-  <vscale x 1 x float>*,
+  ptr,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -4671,7 +4671,7 @@ define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vsc
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8(
     <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
+    ptr %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -4681,11 +4681,11 @@ entry:
 
 declare <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i8(
   <vscale x 2 x float>,
-  <vscale x 2 x float>*,
+  ptr,
   <vscale x 2 x i8>,
   iXLen);
 
-define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float>* %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -4695,7 +4695,7 @@ define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i8(<vscale x
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i8(
     <vscale x 2 x float> undef,
-    <vscale x 2 x float>* %0,
+    ptr %0,
     <vscale x 2 x i8> %1,
     iXLen %2)
 
@@ -4704,13 +4704,13 @@ entry:
 
 declare <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8(
   <vscale x 2 x float>,
-  <vscale x 2 x float>*,
+  ptr,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -4719,7 +4719,7 @@ define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vsc
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8(
     <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
+    ptr %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
     iXLen %4, iXLen 1)
@@ -4729,11 +4729,11 @@ entry:
 
 declare <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i8(
   <vscale x 4 x float>,
-  <vscale x 4 x float>*,
+  ptr,
   <vscale x 4 x i8>,
   iXLen);
 
-define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float>* %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -4743,7 +4743,7 @@ define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i8(<vscale x
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i8(
     <vscale x 4 x float> undef,
-    <vscale x 4 x float>* %0,
+    ptr %0,
     <vscale x 4 x i8> %1,
     iXLen %2)
 
@@ -4752,13 +4752,13 @@ entry:
 
 declare <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8(
   <vscale x 4 x float>,
-  <vscale x 4 x float>*,
+  ptr,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -4767,7 +4767,7 @@ define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vsc
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8(
     <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
+    ptr %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
     iXLen %4, iXLen 1)
@@ -4777,11 +4777,11 @@ entry:
 
 declare <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i8(
   <vscale x 8 x float>,
-  <vscale x 8 x float>*,
+  ptr,
   <vscale x 8 x i8>,
   iXLen);
 
-define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float>* %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -4791,7 +4791,7 @@ define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i8(<vscale x
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i8(
     <vscale x 8 x float> undef,
-    <vscale x 8 x float>* %0,
+    ptr %0,
     <vscale x 8 x i8> %1,
     iXLen %2)
 
@@ -4800,13 +4800,13 @@ entry:
 
 declare <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8(
   <vscale x 8 x float>,
-  <vscale x 8 x float>*,
+  ptr,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -4815,7 +4815,7 @@ define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vsc
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8(
     <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
+    ptr %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
     iXLen %4, iXLen 1)
@@ -4825,11 +4825,11 @@ entry:
 
 declare <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i8(
   <vscale x 16 x float>,
-  <vscale x 16 x float>*,
+  ptr,
   <vscale x 16 x i8>,
   iXLen);
 
-define <vscale x 16 x float> @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float>* %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -4839,7 +4839,7 @@ define <vscale x 16 x float> @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i8(<vsca
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i8(
     <vscale x 16 x float> undef,
-    <vscale x 16 x float>* %0,
+    ptr %0,
     <vscale x 16 x i8> %1,
     iXLen %2)
 
@@ -4848,13 +4848,13 @@ entry:
 
 declare <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8(
   <vscale x 16 x float>,
-  <vscale x 16 x float>*,
+  ptr,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 16 x float> @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -4863,7 +4863,7 @@ define <vscale x 16 x float> @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i8(
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8(
     <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
+    ptr %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
     iXLen %4, iXLen 1)
@@ -4873,11 +4873,11 @@ entry:
 
 declare <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i8(
   <vscale x 1 x double>,
-  <vscale x 1 x double>*,
+  ptr,
   <vscale x 1 x i8>,
   iXLen);
 
-define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double>* %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -4887,7 +4887,7 @@ define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i8(<vscale
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i8(
     <vscale x 1 x double> undef,
-    <vscale x 1 x double>* %0,
+    ptr %0,
     <vscale x 1 x i8> %1,
     iXLen %2)
 
@@ -4896,13 +4896,13 @@ entry:
 
 declare <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8(
   <vscale x 1 x double>,
-  <vscale x 1 x double>*,
+  ptr,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -4911,7 +4911,7 @@ define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vs
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8(
     <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
+    ptr %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
     iXLen %4, iXLen 1)
@@ -4921,11 +4921,11 @@ entry:
 
 declare <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i8(
   <vscale x 2 x double>,
-  <vscale x 2 x double>*,
+  ptr,
   <vscale x 2 x i8>,
   iXLen);
 
-define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double>* %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -4935,7 +4935,7 @@ define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i8(<vscale
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i8(
     <vscale x 2 x double> undef,
-    <vscale x 2 x double>* %0,
+    ptr %0,
     <vscale x 2 x i8> %1,
     iXLen %2)
 
@@ -4944,13 +4944,13 @@ entry:
 
 declare <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8(
   <vscale x 2 x double>,
-  <vscale x 2 x double>*,
+  ptr,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -4959,7 +4959,7 @@ define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vs
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8(
     <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
+    ptr %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
     iXLen %4, iXLen 1)
@@ -4969,11 +4969,11 @@ entry:
 
 declare <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i8(
   <vscale x 4 x double>,
-  <vscale x 4 x double>*,
+  ptr,
   <vscale x 4 x i8>,
   iXLen);
 
-define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double>* %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -4983,7 +4983,7 @@ define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i8(<vscale
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i8(
     <vscale x 4 x double> undef,
-    <vscale x 4 x double>* %0,
+    ptr %0,
     <vscale x 4 x i8> %1,
     iXLen %2)
 
@@ -4992,13 +4992,13 @@ entry:
 
 declare <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8(
   <vscale x 4 x double>,
-  <vscale x 4 x double>*,
+  ptr,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -5007,7 +5007,7 @@ define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vs
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8(
     <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
+    ptr %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
     iXLen %4, iXLen 1)
@@ -5017,11 +5017,11 @@ entry:
 
 declare <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i8(
   <vscale x 8 x double>,
-  <vscale x 8 x double>*,
+  ptr,
   <vscale x 8 x i8>,
   iXLen);
 
-define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double>* %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -5031,7 +5031,7 @@ define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i8(<vscale
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i8(
     <vscale x 8 x double> undef,
-    <vscale x 8 x double>* %0,
+    ptr %0,
     <vscale x 8 x i8> %1,
     iXLen %2)
 
@@ -5040,13 +5040,13 @@ entry:
 
 declare <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8(
   <vscale x 8 x double>,
-  <vscale x 8 x double>*,
+  ptr,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -5055,7 +5055,7 @@ define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vs
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8(
     <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
+    ptr %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
     iXLen %4, iXLen 1)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vpload.ll b/llvm/test/CodeGen/RISCV/rvv/vpload.ll
index acca2d2f40001..c203fcb903e56 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpload.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpload.ll
@@ -4,19 +4,19 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v \
 ; RUN:     -verify-machineinstrs < %s | FileCheck %s
 
-declare <vscale x 1 x i8> @llvm.vp.load.nxv1i8.p0(<vscale x 1 x i8>*, <vscale x 1 x i1>, i32)
+declare <vscale x 1 x i8> @llvm.vp.load.nxv1i8.p0(ptr, <vscale x 1 x i1>, i32)
 
-define <vscale x 1 x i8> @vpload_nxv1i8(<vscale x 1 x i8>* %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+define <vscale x 1 x i8> @vpload_nxv1i8(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv1i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vle8.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 1 x i8> @llvm.vp.load.nxv1i8.p0(<vscale x 1 x i8>* %ptr, <vscale x 1 x i1> %m, i32 %evl)
+  %load = call <vscale x 1 x i8> @llvm.vp.load.nxv1i8.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i8> %load
 }
 
-define <vscale x 1 x i8> @vpload_nxv1i8_allones_mask(<vscale x 1 x i8>* %ptr, i32 zeroext %evl) {
+define <vscale x 1 x i8> @vpload_nxv1i8_allones_mask(ptr %ptr, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv1i8_allones_mask:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -24,59 +24,59 @@ define <vscale x 1 x i8> @vpload_nxv1i8_allones_mask(<vscale x 1 x i8>* %ptr, i3
 ; CHECK-NEXT:    ret
   %a = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
   %b = shufflevector <vscale x 1 x i1> %a, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %load = call <vscale x 1 x i8> @llvm.vp.load.nxv1i8.p0(<vscale x 1 x i8>* %ptr, <vscale x 1 x i1> %b, i32 %evl)
+  %load = call <vscale x 1 x i8> @llvm.vp.load.nxv1i8.p0(ptr %ptr, <vscale x 1 x i1> %b, i32 %evl)
   ret <vscale x 1 x i8> %load
 }
 
-declare <vscale x 2 x i8> @llvm.vp.load.nxv2i8.p0(<vscale x 2 x i8>*, <vscale x 2 x i1>, i32)
+declare <vscale x 2 x i8> @llvm.vp.load.nxv2i8.p0(ptr, <vscale x 2 x i1>, i32)
 
-define <vscale x 2 x i8> @vpload_nxv2i8(<vscale x 2 x i8>* %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+define <vscale x 2 x i8> @vpload_nxv2i8(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv2i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vle8.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x i8> @llvm.vp.load.nxv2i8.p0(<vscale x 2 x i8>* %ptr, <vscale x 2 x i1> %m, i32 %evl)
+  %load = call <vscale x 2 x i8> @llvm.vp.load.nxv2i8.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i8> %load
 }
 
-declare <vscale x 3 x i8> @llvm.vp.load.nxv3i8.p0(<vscale x 3 x i8>*, <vscale x 3 x i1>, i32)
+declare <vscale x 3 x i8> @llvm.vp.load.nxv3i8.p0(ptr, <vscale x 3 x i1>, i32)
 
-define <vscale x 3 x i8> @vpload_nxv3i8(<vscale x 3 x i8>* %ptr, <vscale x 3 x i1> %m, i32 zeroext %evl) {
+define <vscale x 3 x i8> @vpload_nxv3i8(ptr %ptr, <vscale x 3 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv3i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vle8.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 3 x i8> @llvm.vp.load.nxv3i8.p0(<vscale x 3 x i8>* %ptr, <vscale x 3 x i1> %m, i32 %evl)
+  %load = call <vscale x 3 x i8> @llvm.vp.load.nxv3i8.p0(ptr %ptr, <vscale x 3 x i1> %m, i32 %evl)
   ret <vscale x 3 x i8> %load
 }
 
-declare <vscale x 4 x i8> @llvm.vp.load.nxv4i8.p0(<vscale x 4 x i8>*, <vscale x 4 x i1>, i32)
+declare <vscale x 4 x i8> @llvm.vp.load.nxv4i8.p0(ptr, <vscale x 4 x i1>, i32)
 
-define <vscale x 4 x i8> @vpload_nxv4i8(<vscale x 4 x i8>* %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+define <vscale x 4 x i8> @vpload_nxv4i8(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv4i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vle8.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 4 x i8> @llvm.vp.load.nxv4i8.p0(<vscale x 4 x i8>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
+  %load = call <vscale x 4 x i8> @llvm.vp.load.nxv4i8.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i8> %load
 }
 
-declare <vscale x 8 x i8> @llvm.vp.load.nxv8i8.p0(<vscale x 8 x i8>*, <vscale x 8 x i1>, i32)
+declare <vscale x 8 x i8> @llvm.vp.load.nxv8i8.p0(ptr, <vscale x 8 x i1>, i32)
 
-define <vscale x 8 x i8> @vpload_nxv8i8(<vscale x 8 x i8>* %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+define <vscale x 8 x i8> @vpload_nxv8i8(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv8i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vle8.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 8 x i8> @llvm.vp.load.nxv8i8.p0(<vscale x 8 x i8>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
+  %load = call <vscale x 8 x i8> @llvm.vp.load.nxv8i8.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i8> %load
 }
 
-define <vscale x 8 x i8> @vpload_nxv8i8_allones_mask(<vscale x 8 x i8>* %ptr, i32 zeroext %evl) {
+define <vscale x 8 x i8> @vpload_nxv8i8_allones_mask(ptr %ptr, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv8i8_allones_mask:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -84,35 +84,35 @@ define <vscale x 8 x i8> @vpload_nxv8i8_allones_mask(<vscale x 8 x i8>* %ptr, i3
 ; CHECK-NEXT:    ret
   %a = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
   %b = shufflevector <vscale x 8 x i1> %a, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %load = call <vscale x 8 x i8> @llvm.vp.load.nxv8i8.p0(<vscale x 8 x i8>* %ptr, <vscale x 8 x i1> %b, i32 %evl)
+  %load = call <vscale x 8 x i8> @llvm.vp.load.nxv8i8.p0(ptr %ptr, <vscale x 8 x i1> %b, i32 %evl)
   ret <vscale x 8 x i8> %load
 }
 
-declare <vscale x 1 x i16> @llvm.vp.load.nxv1i16.p0(<vscale x 1 x i16>*, <vscale x 1 x i1>, i32)
+declare <vscale x 1 x i16> @llvm.vp.load.nxv1i16.p0(ptr, <vscale x 1 x i1>, i32)
 
-define <vscale x 1 x i16> @vpload_nxv1i16(<vscale x 1 x i16>* %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+define <vscale x 1 x i16> @vpload_nxv1i16(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv1i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 1 x i16> @llvm.vp.load.nxv1i16.p0(<vscale x 1 x i16>* %ptr, <vscale x 1 x i1> %m, i32 %evl)
+  %load = call <vscale x 1 x i16> @llvm.vp.load.nxv1i16.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i16> %load
 }
 
-declare <vscale x 2 x i16> @llvm.vp.load.nxv2i16.p0(<vscale x 2 x i16>*, <vscale x 2 x i1>, i32)
+declare <vscale x 2 x i16> @llvm.vp.load.nxv2i16.p0(ptr, <vscale x 2 x i1>, i32)
 
-define <vscale x 2 x i16> @vpload_nxv2i16(<vscale x 2 x i16>* %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+define <vscale x 2 x i16> @vpload_nxv2i16(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv2i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x i16> @llvm.vp.load.nxv2i16.p0(<vscale x 2 x i16>* %ptr, <vscale x 2 x i1> %m, i32 %evl)
+  %load = call <vscale x 2 x i16> @llvm.vp.load.nxv2i16.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i16> %load
 }
 
-define <vscale x 2 x i16> @vpload_nxv2i16_allones_mask(<vscale x 2 x i16>* %ptr, i32 zeroext %evl) {
+define <vscale x 2 x i16> @vpload_nxv2i16_allones_mask(ptr %ptr, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv2i16_allones_mask:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -120,71 +120,71 @@ define <vscale x 2 x i16> @vpload_nxv2i16_allones_mask(<vscale x 2 x i16>* %ptr,
 ; CHECK-NEXT:    ret
   %a = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
   %b = shufflevector <vscale x 2 x i1> %a, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %load = call <vscale x 2 x i16> @llvm.vp.load.nxv2i16.p0(<vscale x 2 x i16>* %ptr, <vscale x 2 x i1> %b, i32 %evl)
+  %load = call <vscale x 2 x i16> @llvm.vp.load.nxv2i16.p0(ptr %ptr, <vscale x 2 x i1> %b, i32 %evl)
   ret <vscale x 2 x i16> %load
 }
 
-declare <vscale x 4 x i16> @llvm.vp.load.nxv4i16.p0(<vscale x 4 x i16>*, <vscale x 4 x i1>, i32)
+declare <vscale x 4 x i16> @llvm.vp.load.nxv4i16.p0(ptr, <vscale x 4 x i1>, i32)
 
-define <vscale x 4 x i16> @vpload_nxv4i16(<vscale x 4 x i16>* %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+define <vscale x 4 x i16> @vpload_nxv4i16(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv4i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 4 x i16> @llvm.vp.load.nxv4i16.p0(<vscale x 4 x i16>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
+  %load = call <vscale x 4 x i16> @llvm.vp.load.nxv4i16.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i16> %load
 }
 
-declare <vscale x 8 x i16> @llvm.vp.load.nxv8i16.p0(<vscale x 8 x i16>*, <vscale x 8 x i1>, i32)
+declare <vscale x 8 x i16> @llvm.vp.load.nxv8i16.p0(ptr, <vscale x 8 x i1>, i32)
 
-define <vscale x 8 x i16> @vpload_nxv8i16(<vscale x 8 x i16>* %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+define <vscale x 8 x i16> @vpload_nxv8i16(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 8 x i16> @llvm.vp.load.nxv8i16.p0(<vscale x 8 x i16>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
+  %load = call <vscale x 8 x i16> @llvm.vp.load.nxv8i16.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i16> %load
 }
 
-declare <vscale x 1 x i32> @llvm.vp.load.nxv1i32.p0(<vscale x 1 x i32>*, <vscale x 1 x i1>, i32)
+declare <vscale x 1 x i32> @llvm.vp.load.nxv1i32.p0(ptr, <vscale x 1 x i1>, i32)
 
-define <vscale x 1 x i32> @vpload_nxv1i32(<vscale x 1 x i32>* %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+define <vscale x 1 x i32> @vpload_nxv1i32(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv1i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 1 x i32> @llvm.vp.load.nxv1i32.p0(<vscale x 1 x i32>* %ptr, <vscale x 1 x i1> %m, i32 %evl)
+  %load = call <vscale x 1 x i32> @llvm.vp.load.nxv1i32.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i32> %load
 }
 
-declare <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(<vscale x 2 x i32>*, <vscale x 2 x i1>, i32)
+declare <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(ptr, <vscale x 2 x i1>, i32)
 
-define <vscale x 2 x i32> @vpload_nxv2i32(<vscale x 2 x i32>* %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+define <vscale x 2 x i32> @vpload_nxv2i32(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv2i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(<vscale x 2 x i32>* %ptr, <vscale x 2 x i1> %m, i32 %evl)
+  %load = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i32> %load
 }
 
-declare <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(<vscale x 4 x i32>*, <vscale x 4 x i1>, i32)
+declare <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr, <vscale x 4 x i1>, i32)
 
-define <vscale x 4 x i32> @vpload_nxv4i32(<vscale x 4 x i32>* %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+define <vscale x 4 x i32> @vpload_nxv4i32(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(<vscale x 4 x i32>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
+  %load = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i32> %load
 }
 
-define <vscale x 4 x i32> @vpload_nxv4i32_allones_mask(<vscale x 4 x i32>* %ptr, i32 zeroext %evl) {
+define <vscale x 4 x i32> @vpload_nxv4i32_allones_mask(ptr %ptr, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv4i32_allones_mask:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -192,35 +192,35 @@ define <vscale x 4 x i32> @vpload_nxv4i32_allones_mask(<vscale x 4 x i32>* %ptr,
 ; CHECK-NEXT:    ret
   %a = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
   %b = shufflevector <vscale x 4 x i1> %a, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %load = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(<vscale x 4 x i32>* %ptr, <vscale x 4 x i1> %b, i32 %evl)
+  %load = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr %ptr, <vscale x 4 x i1> %b, i32 %evl)
   ret <vscale x 4 x i32> %load
 }
 
-declare <vscale x 8 x i32> @llvm.vp.load.nxv8i32.p0(<vscale x 8 x i32>*, <vscale x 8 x i1>, i32)
+declare <vscale x 8 x i32> @llvm.vp.load.nxv8i32.p0(ptr, <vscale x 8 x i1>, i32)
 
-define <vscale x 8 x i32> @vpload_nxv8i32(<vscale x 8 x i32>* %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+define <vscale x 8 x i32> @vpload_nxv8i32(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv8i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 8 x i32> @llvm.vp.load.nxv8i32.p0(<vscale x 8 x i32>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
+  %load = call <vscale x 8 x i32> @llvm.vp.load.nxv8i32.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i32> %load
 }
 
-declare <vscale x 1 x i64> @llvm.vp.load.nxv1i64.p0(<vscale x 1 x i64>*, <vscale x 1 x i1>, i32)
+declare <vscale x 1 x i64> @llvm.vp.load.nxv1i64.p0(ptr, <vscale x 1 x i1>, i32)
 
-define <vscale x 1 x i64> @vpload_nxv1i64(<vscale x 1 x i64>* %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+define <vscale x 1 x i64> @vpload_nxv1i64(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv1i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 1 x i64> @llvm.vp.load.nxv1i64.p0(<vscale x 1 x i64>* %ptr, <vscale x 1 x i1> %m, i32 %evl)
+  %load = call <vscale x 1 x i64> @llvm.vp.load.nxv1i64.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i64> %load
 }
 
-define <vscale x 1 x i64> @vpload_nxv1i64_allones_mask(<vscale x 1 x i64>* %ptr, i32 zeroext %evl) {
+define <vscale x 1 x i64> @vpload_nxv1i64_allones_mask(ptr %ptr, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv1i64_allones_mask:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -228,71 +228,71 @@ define <vscale x 1 x i64> @vpload_nxv1i64_allones_mask(<vscale x 1 x i64>* %ptr,
 ; CHECK-NEXT:    ret
   %a = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
   %b = shufflevector <vscale x 1 x i1> %a, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %load = call <vscale x 1 x i64> @llvm.vp.load.nxv1i64.p0(<vscale x 1 x i64>* %ptr, <vscale x 1 x i1> %b, i32 %evl)
+  %load = call <vscale x 1 x i64> @llvm.vp.load.nxv1i64.p0(ptr %ptr, <vscale x 1 x i1> %b, i32 %evl)
   ret <vscale x 1 x i64> %load
 }
 
-declare <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(<vscale x 2 x i64>*, <vscale x 2 x i1>, i32)
+declare <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr, <vscale x 2 x i1>, i32)
 
-define <vscale x 2 x i64> @vpload_nxv2i64(<vscale x 2 x i64>* %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+define <vscale x 2 x i64> @vpload_nxv2i64(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv2i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(<vscale x 2 x i64>* %ptr, <vscale x 2 x i1> %m, i32 %evl)
+  %load = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i64> %load
 }
 
-declare <vscale x 4 x i64> @llvm.vp.load.nxv4i64.p0(<vscale x 4 x i64>*, <vscale x 4 x i1>, i32)
+declare <vscale x 4 x i64> @llvm.vp.load.nxv4i64.p0(ptr, <vscale x 4 x i1>, i32)
 
-define <vscale x 4 x i64> @vpload_nxv4i64(<vscale x 4 x i64>* %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+define <vscale x 4 x i64> @vpload_nxv4i64(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv4i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 4 x i64> @llvm.vp.load.nxv4i64.p0(<vscale x 4 x i64>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
+  %load = call <vscale x 4 x i64> @llvm.vp.load.nxv4i64.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i64> %load
 }
 
-declare <vscale x 8 x i64> @llvm.vp.load.nxv8i64.p0(<vscale x 8 x i64>*, <vscale x 8 x i1>, i32)
+declare <vscale x 8 x i64> @llvm.vp.load.nxv8i64.p0(ptr, <vscale x 8 x i1>, i32)
 
-define <vscale x 8 x i64> @vpload_nxv8i64(<vscale x 8 x i64>* %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+define <vscale x 8 x i64> @vpload_nxv8i64(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv8i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 8 x i64> @llvm.vp.load.nxv8i64.p0(<vscale x 8 x i64>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
+  %load = call <vscale x 8 x i64> @llvm.vp.load.nxv8i64.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i64> %load
 }
 
-declare <vscale x 1 x half> @llvm.vp.load.nxv1f16.p0(<vscale x 1 x half>*, <vscale x 1 x i1>, i32)
+declare <vscale x 1 x half> @llvm.vp.load.nxv1f16.p0(ptr, <vscale x 1 x i1>, i32)
 
-define <vscale x 1 x half> @vpload_nxv1f16(<vscale x 1 x half>* %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+define <vscale x 1 x half> @vpload_nxv1f16(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 1 x half> @llvm.vp.load.nxv1f16.p0(<vscale x 1 x half>* %ptr, <vscale x 1 x i1> %m, i32 %evl)
+  %load = call <vscale x 1 x half> @llvm.vp.load.nxv1f16.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x half> %load
 }
 
-declare <vscale x 2 x half> @llvm.vp.load.nxv2f16.p0(<vscale x 2 x half>*, <vscale x 2 x i1>, i32)
+declare <vscale x 2 x half> @llvm.vp.load.nxv2f16.p0(ptr, <vscale x 2 x i1>, i32)
 
-define <vscale x 2 x half> @vpload_nxv2f16(<vscale x 2 x half>* %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+define <vscale x 2 x half> @vpload_nxv2f16(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x half> @llvm.vp.load.nxv2f16.p0(<vscale x 2 x half>* %ptr, <vscale x 2 x i1> %m, i32 %evl)
+  %load = call <vscale x 2 x half> @llvm.vp.load.nxv2f16.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x half> %load
 }
 
-define <vscale x 2 x half> @vpload_nxv2f16_allones_mask(<vscale x 2 x half>* %ptr, i32 zeroext %evl) {
+define <vscale x 2 x half> @vpload_nxv2f16_allones_mask(ptr %ptr, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv2f16_allones_mask:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -300,83 +300,83 @@ define <vscale x 2 x half> @vpload_nxv2f16_allones_mask(<vscale x 2 x half>* %pt
 ; CHECK-NEXT:    ret
   %a = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
   %b = shufflevector <vscale x 2 x i1> %a, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %load = call <vscale x 2 x half> @llvm.vp.load.nxv2f16.p0(<vscale x 2 x half>* %ptr, <vscale x 2 x i1> %b, i32 %evl)
+  %load = call <vscale x 2 x half> @llvm.vp.load.nxv2f16.p0(ptr %ptr, <vscale x 2 x i1> %b, i32 %evl)
   ret <vscale x 2 x half> %load
 }
 
-declare <vscale x 4 x half> @llvm.vp.load.nxv4f16.p0(<vscale x 4 x half>*, <vscale x 4 x i1>, i32)
+declare <vscale x 4 x half> @llvm.vp.load.nxv4f16.p0(ptr, <vscale x 4 x i1>, i32)
 
-define <vscale x 4 x half> @vpload_nxv4f16(<vscale x 4 x half>* %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+define <vscale x 4 x half> @vpload_nxv4f16(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 4 x half> @llvm.vp.load.nxv4f16.p0(<vscale x 4 x half>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
+  %load = call <vscale x 4 x half> @llvm.vp.load.nxv4f16.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x half> %load
 }
 
-declare <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(<vscale x 8 x half>*, <vscale x 8 x i1>, i32)
+declare <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr, <vscale x 8 x i1>, i32)
 
-define <vscale x 8 x half> @vpload_nxv8f16(<vscale x 8 x half>* %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+define <vscale x 8 x half> @vpload_nxv8f16(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(<vscale x 8 x half>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
+  %load = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x half> %load
 }
 
-declare <vscale x 1 x float> @llvm.vp.load.nxv1f32.p0(<vscale x 1 x float>*, <vscale x 1 x i1>, i32)
+declare <vscale x 1 x float> @llvm.vp.load.nxv1f32.p0(ptr, <vscale x 1 x i1>, i32)
 
-define <vscale x 1 x float> @vpload_nxv1f32(<vscale x 1 x float>* %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+define <vscale x 1 x float> @vpload_nxv1f32(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 1 x float> @llvm.vp.load.nxv1f32.p0(<vscale x 1 x float>* %ptr, <vscale x 1 x i1> %m, i32 %evl)
+  %load = call <vscale x 1 x float> @llvm.vp.load.nxv1f32.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x float> %load
 }
 
-declare <vscale x 2 x float> @llvm.vp.load.nxv2f32.p0(<vscale x 2 x float>*, <vscale x 2 x i1>, i32)
+declare <vscale x 2 x float> @llvm.vp.load.nxv2f32.p0(ptr, <vscale x 2 x i1>, i32)
 
-define <vscale x 2 x float> @vpload_nxv2f32(<vscale x 2 x float>* %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+define <vscale x 2 x float> @vpload_nxv2f32(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x float> @llvm.vp.load.nxv2f32.p0(<vscale x 2 x float>* %ptr, <vscale x 2 x i1> %m, i32 %evl)
+  %load = call <vscale x 2 x float> @llvm.vp.load.nxv2f32.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x float> %load
 }
 
-declare <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(<vscale x 4 x float>*, <vscale x 4 x i1>, i32)
+declare <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr, <vscale x 4 x i1>, i32)
 
-define <vscale x 4 x float> @vpload_nxv4f32(<vscale x 4 x float>* %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+define <vscale x 4 x float> @vpload_nxv4f32(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(<vscale x 4 x float>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
+  %load = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x float> %load
 }
 
-declare <vscale x 8 x float> @llvm.vp.load.nxv8f32.p0(<vscale x 8 x float>*, <vscale x 8 x i1>, i32)
+declare <vscale x 8 x float> @llvm.vp.load.nxv8f32.p0(ptr, <vscale x 8 x i1>, i32)
 
-define <vscale x 8 x float> @vpload_nxv8f32(<vscale x 8 x float>* %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+define <vscale x 8 x float> @vpload_nxv8f32(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 8 x float> @llvm.vp.load.nxv8f32.p0(<vscale x 8 x float>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
+  %load = call <vscale x 8 x float> @llvm.vp.load.nxv8f32.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x float> %load
 }
 
-define <vscale x 8 x float> @vpload_nxv8f32_allones_mask(<vscale x 8 x float>* %ptr, i32 zeroext %evl) {
+define <vscale x 8 x float> @vpload_nxv8f32_allones_mask(ptr %ptr, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv8f32_allones_mask:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -384,47 +384,47 @@ define <vscale x 8 x float> @vpload_nxv8f32_allones_mask(<vscale x 8 x float>* %
 ; CHECK-NEXT:    ret
   %a = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
   %b = shufflevector <vscale x 8 x i1> %a, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %load = call <vscale x 8 x float> @llvm.vp.load.nxv8f32.p0(<vscale x 8 x float>* %ptr, <vscale x 8 x i1> %b, i32 %evl)
+  %load = call <vscale x 8 x float> @llvm.vp.load.nxv8f32.p0(ptr %ptr, <vscale x 8 x i1> %b, i32 %evl)
   ret <vscale x 8 x float> %load
 }
 
-declare <vscale x 1 x double> @llvm.vp.load.nxv1f64.p0(<vscale x 1 x double>*, <vscale x 1 x i1>, i32)
+declare <vscale x 1 x double> @llvm.vp.load.nxv1f64.p0(ptr, <vscale x 1 x i1>, i32)
 
-define <vscale x 1 x double> @vpload_nxv1f64(<vscale x 1 x double>* %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+define <vscale x 1 x double> @vpload_nxv1f64(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 1 x double> @llvm.vp.load.nxv1f64.p0(<vscale x 1 x double>* %ptr, <vscale x 1 x i1> %m, i32 %evl)
+  %load = call <vscale x 1 x double> @llvm.vp.load.nxv1f64.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x double> %load
 }
 
-declare <vscale x 2 x double> @llvm.vp.load.nxv2f64.p0(<vscale x 2 x double>*, <vscale x 2 x i1>, i32)
+declare <vscale x 2 x double> @llvm.vp.load.nxv2f64.p0(ptr, <vscale x 2 x i1>, i32)
 
-define <vscale x 2 x double> @vpload_nxv2f64(<vscale x 2 x double>* %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+define <vscale x 2 x double> @vpload_nxv2f64(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x double> @llvm.vp.load.nxv2f64.p0(<vscale x 2 x double>* %ptr, <vscale x 2 x i1> %m, i32 %evl)
+  %load = call <vscale x 2 x double> @llvm.vp.load.nxv2f64.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x double> %load
 }
 
-declare <vscale x 4 x double> @llvm.vp.load.nxv4f64.p0(<vscale x 4 x double>*, <vscale x 4 x i1>, i32)
+declare <vscale x 4 x double> @llvm.vp.load.nxv4f64.p0(ptr, <vscale x 4 x i1>, i32)
 
-define <vscale x 4 x double> @vpload_nxv4f64(<vscale x 4 x double>* %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+define <vscale x 4 x double> @vpload_nxv4f64(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 4 x double> @llvm.vp.load.nxv4f64.p0(<vscale x 4 x double>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
+  %load = call <vscale x 4 x double> @llvm.vp.load.nxv4f64.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x double> %load
 }
 
-define <vscale x 4 x double> @vpload_nxv4f64_allones_mask(<vscale x 4 x double>* %ptr, i32 zeroext %evl) {
+define <vscale x 4 x double> @vpload_nxv4f64_allones_mask(ptr %ptr, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv4f64_allones_mask:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -432,25 +432,25 @@ define <vscale x 4 x double> @vpload_nxv4f64_allones_mask(<vscale x 4 x double>*
 ; CHECK-NEXT:    ret
   %a = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
   %b = shufflevector <vscale x 4 x i1> %a, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %load = call <vscale x 4 x double> @llvm.vp.load.nxv4f64.p0(<vscale x 4 x double>* %ptr, <vscale x 4 x i1> %b, i32 %evl)
+  %load = call <vscale x 4 x double> @llvm.vp.load.nxv4f64.p0(ptr %ptr, <vscale x 4 x i1> %b, i32 %evl)
   ret <vscale x 4 x double> %load
 }
 
-declare <vscale x 8 x double> @llvm.vp.load.nxv8f64.p0(<vscale x 8 x double>*, <vscale x 8 x i1>, i32)
+declare <vscale x 8 x double> @llvm.vp.load.nxv8f64.p0(ptr, <vscale x 8 x i1>, i32)
 
-define <vscale x 8 x double> @vpload_nxv8f64(<vscale x 8 x double>* %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+define <vscale x 8 x double> @vpload_nxv8f64(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 8 x double> @llvm.vp.load.nxv8f64.p0(<vscale x 8 x double>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
+  %load = call <vscale x 8 x double> @llvm.vp.load.nxv8f64.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x double> %load
 }
 
-declare <vscale x 16 x double> @llvm.vp.load.nxv16f64.p0(<vscale x 16 x double>*, <vscale x 16 x i1>, i32)
+declare <vscale x 16 x double> @llvm.vp.load.nxv16f64.p0(ptr, <vscale x 16 x i1>, i32)
 
-define <vscale x 16 x double> @vpload_nxv16f64(<vscale x 16 x double>* %ptr, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+define <vscale x 16 x double> @vpload_nxv16f64(ptr %ptr, <vscale x 16 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv16f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v8, v0
@@ -474,11 +474,11 @@ define <vscale x 16 x double> @vpload_nxv16f64(<vscale x 16 x double>* %ptr, <vs
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 16 x double> @llvm.vp.load.nxv16f64.p0(<vscale x 16 x double>* %ptr, <vscale x 16 x i1> %m, i32 %evl)
+  %load = call <vscale x 16 x double> @llvm.vp.load.nxv16f64.p0(ptr %ptr, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x double> %load
 }
 
-declare <vscale x 17 x double> @llvm.vp.load.nxv17f64.p0(<vscale x 17 x double>*, <vscale x 17 x i1>, i32)
+declare <vscale x 17 x double> @llvm.vp.load.nxv17f64.p0(ptr, <vscale x 17 x i1>, i32)
 
 declare <vscale x 1 x double> @llvm.vector.extract.nxv1f64(<vscale x 17 x double> %vec, i64 %idx)
 declare <vscale x 16 x double> @llvm.vector.extract.nxv16f64(<vscale x 17 x double> %vec, i64 %idx)
@@ -489,7 +489,7 @@ declare <vscale x 16 x double> @llvm.vector.extract.nxv16f64(<vscale x 17 x doub
 
 ; Widen to nxv32f64 then split into 4 x nxv8f64, of which 1 is empty.
 
-define <vscale x 16 x double> @vpload_nxv17f64(<vscale x 17 x double>* %ptr, <vscale x 1 x double>* %out, <vscale x 17 x i1> %m, i32 zeroext %evl) {
+define <vscale x 16 x double> @vpload_nxv17f64(ptr %ptr, ptr %out, <vscale x 17 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv17f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a3, vlenb
@@ -535,9 +535,9 @@ define <vscale x 16 x double> @vpload_nxv17f64(<vscale x 17 x double>* %ptr, <vs
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
 ; CHECK-NEXT:    vs1r.v v24, (a1)
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 17 x double> @llvm.vp.load.nxv17f64.p0(<vscale x 17 x double>* %ptr, <vscale x 17 x i1> %m, i32 %evl)
+  %load = call <vscale x 17 x double> @llvm.vp.load.nxv17f64.p0(ptr %ptr, <vscale x 17 x i1> %m, i32 %evl)
   %lo = call <vscale x 16 x double> @llvm.vector.extract.nxv16f64(<vscale x 17 x double> %load, i64 0)
   %hi = call <vscale x 1 x double> @llvm.vector.extract.nxv1f64(<vscale x 17 x double> %load, i64 16)
-  store <vscale x 1 x double> %hi, <vscale x 1 x double>* %out
+  store <vscale x 1 x double> %hi, ptr %out
   ret <vscale x 16 x double> %lo
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/vpstore.ll
index 082148e447548..8b27a61e243db 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpstore.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpstore.ll
@@ -4,355 +4,355 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v \
 ; RUN:     -verify-machineinstrs < %s | FileCheck %s
 
-declare void @llvm.vp.store.nxv1i8.p0(<vscale x 1 x i8>, <vscale x 1 x i8>*, <vscale x 1 x i1>, i32)
+declare void @llvm.vp.store.nxv1i8.p0(<vscale x 1 x i8>, ptr, <vscale x 1 x i1>, i32)
 
-define void @vpstore_nxv1i8(<vscale x 1 x i8> %val, <vscale x 1 x i8>* %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+define void @vpstore_nxv1i8(<vscale x 1 x i8> %val, ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv1i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vse8.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv1i8.p0(<vscale x 1 x i8> %val, <vscale x 1 x i8>* %ptr, <vscale x 1 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv1i8.p0(<vscale x 1 x i8> %val, ptr %ptr, <vscale x 1 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv2i8.p0(<vscale x 2 x i8>, <vscale x 2 x i8>*, <vscale x 2 x i1>, i32)
+declare void @llvm.vp.store.nxv2i8.p0(<vscale x 2 x i8>, ptr, <vscale x 2 x i1>, i32)
 
-define void @vpstore_nxv2i8(<vscale x 2 x i8> %val, <vscale x 2 x i8>* %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+define void @vpstore_nxv2i8(<vscale x 2 x i8> %val, ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv2i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vse8.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv2i8.p0(<vscale x 2 x i8> %val, <vscale x 2 x i8>* %ptr, <vscale x 2 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv2i8.p0(<vscale x 2 x i8> %val, ptr %ptr, <vscale x 2 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv3i8.p0(<vscale x 3 x i8>, <vscale x 3 x i8>*, <vscale x 3 x i1>, i32)
+declare void @llvm.vp.store.nxv3i8.p0(<vscale x 3 x i8>, ptr, <vscale x 3 x i1>, i32)
 
-define void @vpstore_nxv3i8(<vscale x 3 x i8> %val, <vscale x 3 x i8>* %ptr, <vscale x 3 x i1> %m, i32 zeroext %evl) {
+define void @vpstore_nxv3i8(<vscale x 3 x i8> %val, ptr %ptr, <vscale x 3 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv3i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vse8.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv3i8.p0(<vscale x 3 x i8> %val, <vscale x 3 x i8>* %ptr, <vscale x 3 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv3i8.p0(<vscale x 3 x i8> %val, ptr %ptr, <vscale x 3 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv4i8.p0(<vscale x 4 x i8>, <vscale x 4 x i8>*, <vscale x 4 x i1>, i32)
+declare void @llvm.vp.store.nxv4i8.p0(<vscale x 4 x i8>, ptr, <vscale x 4 x i1>, i32)
 
-define void @vpstore_nxv4i8(<vscale x 4 x i8> %val, <vscale x 4 x i8>* %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+define void @vpstore_nxv4i8(<vscale x 4 x i8> %val, ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv4i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vse8.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv4i8.p0(<vscale x 4 x i8> %val, <vscale x 4 x i8>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv4i8.p0(<vscale x 4 x i8> %val, ptr %ptr, <vscale x 4 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv8i8.p0(<vscale x 8 x i8>, <vscale x 8 x i8>*, <vscale x 8 x i1>, i32)
+declare void @llvm.vp.store.nxv8i8.p0(<vscale x 8 x i8>, ptr, <vscale x 8 x i1>, i32)
 
-define void @vpstore_nxv8i8(<vscale x 8 x i8> %val, <vscale x 8 x i8>* %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+define void @vpstore_nxv8i8(<vscale x 8 x i8> %val, ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv8i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vse8.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv8i8.p0(<vscale x 8 x i8> %val, <vscale x 8 x i8>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv8i8.p0(<vscale x 8 x i8> %val, ptr %ptr, <vscale x 8 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv1i16.p0(<vscale x 1 x i16>, <vscale x 1 x i16>*, <vscale x 1 x i1>, i32)
+declare void @llvm.vp.store.nxv1i16.p0(<vscale x 1 x i16>, ptr, <vscale x 1 x i1>, i32)
 
-define void @vpstore_nxv1i16(<vscale x 1 x i16> %val, <vscale x 1 x i16>* %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+define void @vpstore_nxv1i16(<vscale x 1 x i16> %val, ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv1i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv1i16.p0(<vscale x 1 x i16> %val, <vscale x 1 x i16>* %ptr, <vscale x 1 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv1i16.p0(<vscale x 1 x i16> %val, ptr %ptr, <vscale x 1 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv2i16.p0(<vscale x 2 x i16>, <vscale x 2 x i16>*, <vscale x 2 x i1>, i32)
+declare void @llvm.vp.store.nxv2i16.p0(<vscale x 2 x i16>, ptr, <vscale x 2 x i1>, i32)
 
-define void @vpstore_nxv2i16(<vscale x 2 x i16> %val, <vscale x 2 x i16>* %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+define void @vpstore_nxv2i16(<vscale x 2 x i16> %val, ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv2i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv2i16.p0(<vscale x 2 x i16> %val, <vscale x 2 x i16>* %ptr, <vscale x 2 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv2i16.p0(<vscale x 2 x i16> %val, ptr %ptr, <vscale x 2 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv4i16.p0(<vscale x 4 x i16>, <vscale x 4 x i16>*, <vscale x 4 x i1>, i32)
+declare void @llvm.vp.store.nxv4i16.p0(<vscale x 4 x i16>, ptr, <vscale x 4 x i1>, i32)
 
-define void @vpstore_nxv4i16(<vscale x 4 x i16> %val, <vscale x 4 x i16>* %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+define void @vpstore_nxv4i16(<vscale x 4 x i16> %val, ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv4i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv4i16.p0(<vscale x 4 x i16> %val, <vscale x 4 x i16>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv4i16.p0(<vscale x 4 x i16> %val, ptr %ptr, <vscale x 4 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv8i16.p0(<vscale x 8 x i16>, <vscale x 8 x i16>*, <vscale x 8 x i1>, i32)
+declare void @llvm.vp.store.nxv8i16.p0(<vscale x 8 x i16>, ptr, <vscale x 8 x i1>, i32)
 
-define void @vpstore_nxv8i16(<vscale x 8 x i16> %val, <vscale x 8 x i16>* %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+define void @vpstore_nxv8i16(<vscale x 8 x i16> %val, ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv8i16.p0(<vscale x 8 x i16> %val, <vscale x 8 x i16>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv8i16.p0(<vscale x 8 x i16> %val, ptr %ptr, <vscale x 8 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv1i32.p0(<vscale x 1 x i32>, <vscale x 1 x i32>*, <vscale x 1 x i1>, i32)
+declare void @llvm.vp.store.nxv1i32.p0(<vscale x 1 x i32>, ptr, <vscale x 1 x i1>, i32)
 
-define void @vpstore_nxv1i32(<vscale x 1 x i32> %val, <vscale x 1 x i32>* %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+define void @vpstore_nxv1i32(<vscale x 1 x i32> %val, ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv1i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv1i32.p0(<vscale x 1 x i32> %val, <vscale x 1 x i32>* %ptr, <vscale x 1 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv1i32.p0(<vscale x 1 x i32> %val, ptr %ptr, <vscale x 1 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv2i32.p0(<vscale x 2 x i32>, <vscale x 2 x i32>*, <vscale x 2 x i1>, i32)
+declare void @llvm.vp.store.nxv2i32.p0(<vscale x 2 x i32>, ptr, <vscale x 2 x i1>, i32)
 
-define void @vpstore_nxv2i32(<vscale x 2 x i32> %val, <vscale x 2 x i32>* %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+define void @vpstore_nxv2i32(<vscale x 2 x i32> %val, ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv2i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv2i32.p0(<vscale x 2 x i32> %val, <vscale x 2 x i32>* %ptr, <vscale x 2 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv2i32.p0(<vscale x 2 x i32> %val, ptr %ptr, <vscale x 2 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32>, <vscale x 4 x i32>*, <vscale x 4 x i1>, i32)
+declare void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32>, ptr, <vscale x 4 x i1>, i32)
 
-define void @vpstore_nxv4i32(<vscale x 4 x i32> %val, <vscale x 4 x i32>* %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+define void @vpstore_nxv4i32(<vscale x 4 x i32> %val, ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> %val, <vscale x 4 x i32>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> %val, ptr %ptr, <vscale x 4 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv8i32.p0(<vscale x 8 x i32>, <vscale x 8 x i32>*, <vscale x 8 x i1>, i32)
+declare void @llvm.vp.store.nxv8i32.p0(<vscale x 8 x i32>, ptr, <vscale x 8 x i1>, i32)
 
-define void @vpstore_nxv8i32(<vscale x 8 x i32> %val, <vscale x 8 x i32>* %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+define void @vpstore_nxv8i32(<vscale x 8 x i32> %val, ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv8i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv8i32.p0(<vscale x 8 x i32> %val, <vscale x 8 x i32>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv8i32.p0(<vscale x 8 x i32> %val, ptr %ptr, <vscale x 8 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv1i64.p0(<vscale x 1 x i64>, <vscale x 1 x i64>*, <vscale x 1 x i1>, i32)
+declare void @llvm.vp.store.nxv1i64.p0(<vscale x 1 x i64>, ptr, <vscale x 1 x i1>, i32)
 
-define void @vpstore_nxv1i64(<vscale x 1 x i64> %val, <vscale x 1 x i64>* %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+define void @vpstore_nxv1i64(<vscale x 1 x i64> %val, ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv1i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv1i64.p0(<vscale x 1 x i64> %val, <vscale x 1 x i64>* %ptr, <vscale x 1 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv1i64.p0(<vscale x 1 x i64> %val, ptr %ptr, <vscale x 1 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64>, <vscale x 2 x i64>*, <vscale x 2 x i1>, i32)
+declare void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64>, ptr, <vscale x 2 x i1>, i32)
 
-define void @vpstore_nxv2i64(<vscale x 2 x i64> %val, <vscale x 2 x i64>* %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+define void @vpstore_nxv2i64(<vscale x 2 x i64> %val, ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv2i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> %val, <vscale x 2 x i64>* %ptr, <vscale x 2 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> %val, ptr %ptr, <vscale x 2 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv4i64.p0(<vscale x 4 x i64>, <vscale x 4 x i64>*, <vscale x 4 x i1>, i32)
+declare void @llvm.vp.store.nxv4i64.p0(<vscale x 4 x i64>, ptr, <vscale x 4 x i1>, i32)
 
-define void @vpstore_nxv4i64(<vscale x 4 x i64> %val, <vscale x 4 x i64>* %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+define void @vpstore_nxv4i64(<vscale x 4 x i64> %val, ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv4i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv4i64.p0(<vscale x 4 x i64> %val, <vscale x 4 x i64>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv4i64.p0(<vscale x 4 x i64> %val, ptr %ptr, <vscale x 4 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv8i64.p0(<vscale x 8 x i64>, <vscale x 8 x i64>*, <vscale x 8 x i1>, i32)
+declare void @llvm.vp.store.nxv8i64.p0(<vscale x 8 x i64>, ptr, <vscale x 8 x i1>, i32)
 
-define void @vpstore_nxv8i64(<vscale x 8 x i64> %val, <vscale x 8 x i64>* %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+define void @vpstore_nxv8i64(<vscale x 8 x i64> %val, ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv8i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv8i64.p0(<vscale x 8 x i64> %val, <vscale x 8 x i64>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv8i64.p0(<vscale x 8 x i64> %val, ptr %ptr, <vscale x 8 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv1f16.p0(<vscale x 1 x half>, <vscale x 1 x half>*, <vscale x 1 x i1>, i32)
+declare void @llvm.vp.store.nxv1f16.p0(<vscale x 1 x half>, ptr, <vscale x 1 x i1>, i32)
 
-define void @vpstore_nxv1f16(<vscale x 1 x half> %val, <vscale x 1 x half>* %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+define void @vpstore_nxv1f16(<vscale x 1 x half> %val, ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv1f16.p0(<vscale x 1 x half> %val, <vscale x 1 x half>* %ptr, <vscale x 1 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv1f16.p0(<vscale x 1 x half> %val, ptr %ptr, <vscale x 1 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv2f16.p0(<vscale x 2 x half>, <vscale x 2 x half>*, <vscale x 2 x i1>, i32)
+declare void @llvm.vp.store.nxv2f16.p0(<vscale x 2 x half>, ptr, <vscale x 2 x i1>, i32)
 
-define void @vpstore_nxv2f16(<vscale x 2 x half> %val, <vscale x 2 x half>* %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+define void @vpstore_nxv2f16(<vscale x 2 x half> %val, ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv2f16.p0(<vscale x 2 x half> %val, <vscale x 2 x half>* %ptr, <vscale x 2 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv2f16.p0(<vscale x 2 x half> %val, ptr %ptr, <vscale x 2 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv4f16.p0(<vscale x 4 x half>, <vscale x 4 x half>*, <vscale x 4 x i1>, i32)
+declare void @llvm.vp.store.nxv4f16.p0(<vscale x 4 x half>, ptr, <vscale x 4 x i1>, i32)
 
-define void @vpstore_nxv4f16(<vscale x 4 x half> %val, <vscale x 4 x half>* %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+define void @vpstore_nxv4f16(<vscale x 4 x half> %val, ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv4f16.p0(<vscale x 4 x half> %val, <vscale x 4 x half>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv4f16.p0(<vscale x 4 x half> %val, ptr %ptr, <vscale x 4 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv8f16.p0(<vscale x 8 x half>, <vscale x 8 x half>*, <vscale x 8 x i1>, i32)
+declare void @llvm.vp.store.nxv8f16.p0(<vscale x 8 x half>, ptr, <vscale x 8 x i1>, i32)
 
-define void @vpstore_nxv8f16(<vscale x 8 x half> %val, <vscale x 8 x half>* %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+define void @vpstore_nxv8f16(<vscale x 8 x half> %val, ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv8f16.p0(<vscale x 8 x half> %val, <vscale x 8 x half>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv8f16.p0(<vscale x 8 x half> %val, ptr %ptr, <vscale x 8 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv1f32.p0(<vscale x 1 x float>, <vscale x 1 x float>*, <vscale x 1 x i1>, i32)
+declare void @llvm.vp.store.nxv1f32.p0(<vscale x 1 x float>, ptr, <vscale x 1 x i1>, i32)
 
-define void @vpstore_nxv1f32(<vscale x 1 x float> %val, <vscale x 1 x float>* %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+define void @vpstore_nxv1f32(<vscale x 1 x float> %val, ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv1f32.p0(<vscale x 1 x float> %val, <vscale x 1 x float>* %ptr, <vscale x 1 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv1f32.p0(<vscale x 1 x float> %val, ptr %ptr, <vscale x 1 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv2f32.p0(<vscale x 2 x float>, <vscale x 2 x float>*, <vscale x 2 x i1>, i32)
+declare void @llvm.vp.store.nxv2f32.p0(<vscale x 2 x float>, ptr, <vscale x 2 x i1>, i32)
 
-define void @vpstore_nxv2f32(<vscale x 2 x float> %val, <vscale x 2 x float>* %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+define void @vpstore_nxv2f32(<vscale x 2 x float> %val, ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv2f32.p0(<vscale x 2 x float> %val, <vscale x 2 x float>* %ptr, <vscale x 2 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv2f32.p0(<vscale x 2 x float> %val, ptr %ptr, <vscale x 2 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv4f32.p0(<vscale x 4 x float>, <vscale x 4 x float>*, <vscale x 4 x i1>, i32)
+declare void @llvm.vp.store.nxv4f32.p0(<vscale x 4 x float>, ptr, <vscale x 4 x i1>, i32)
 
-define void @vpstore_nxv4f32(<vscale x 4 x float> %val, <vscale x 4 x float>* %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+define void @vpstore_nxv4f32(<vscale x 4 x float> %val, ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv4f32.p0(<vscale x 4 x float> %val, <vscale x 4 x float>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv4f32.p0(<vscale x 4 x float> %val, ptr %ptr, <vscale x 4 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv8f32.p0(<vscale x 8 x float>, <vscale x 8 x float>*, <vscale x 8 x i1>, i32)
+declare void @llvm.vp.store.nxv8f32.p0(<vscale x 8 x float>, ptr, <vscale x 8 x i1>, i32)
 
-define void @vpstore_nxv8f32(<vscale x 8 x float> %val, <vscale x 8 x float>* %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+define void @vpstore_nxv8f32(<vscale x 8 x float> %val, ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv8f32.p0(<vscale x 8 x float> %val, <vscale x 8 x float>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv8f32.p0(<vscale x 8 x float> %val, ptr %ptr, <vscale x 8 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv1f64.p0(<vscale x 1 x double>, <vscale x 1 x double>*, <vscale x 1 x i1>, i32)
+declare void @llvm.vp.store.nxv1f64.p0(<vscale x 1 x double>, ptr, <vscale x 1 x i1>, i32)
 
-define void @vpstore_nxv1f64(<vscale x 1 x double> %val, <vscale x 1 x double>* %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+define void @vpstore_nxv1f64(<vscale x 1 x double> %val, ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv1f64.p0(<vscale x 1 x double> %val, <vscale x 1 x double>* %ptr, <vscale x 1 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv1f64.p0(<vscale x 1 x double> %val, ptr %ptr, <vscale x 1 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv2f64.p0(<vscale x 2 x double>, <vscale x 2 x double>*, <vscale x 2 x i1>, i32)
+declare void @llvm.vp.store.nxv2f64.p0(<vscale x 2 x double>, ptr, <vscale x 2 x i1>, i32)
 
-define void @vpstore_nxv2f64(<vscale x 2 x double> %val, <vscale x 2 x double>* %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+define void @vpstore_nxv2f64(<vscale x 2 x double> %val, ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv2f64.p0(<vscale x 2 x double> %val, <vscale x 2 x double>* %ptr, <vscale x 2 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv2f64.p0(<vscale x 2 x double> %val, ptr %ptr, <vscale x 2 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv4f64.p0(<vscale x 4 x double>, <vscale x 4 x double>*, <vscale x 4 x i1>, i32)
+declare void @llvm.vp.store.nxv4f64.p0(<vscale x 4 x double>, ptr, <vscale x 4 x i1>, i32)
 
-define void @vpstore_nxv4f64(<vscale x 4 x double> %val, <vscale x 4 x double>* %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+define void @vpstore_nxv4f64(<vscale x 4 x double> %val, ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv4f64.p0(<vscale x 4 x double> %val, <vscale x 4 x double>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv4f64.p0(<vscale x 4 x double> %val, ptr %ptr, <vscale x 4 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv8f64.p0(<vscale x 8 x double>, <vscale x 8 x double>*, <vscale x 8 x i1>, i32)
+declare void @llvm.vp.store.nxv8f64.p0(<vscale x 8 x double>, ptr, <vscale x 8 x i1>, i32)
 
-define void @vpstore_nxv8f64(<vscale x 8 x double> %val, <vscale x 8 x double>* %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+define void @vpstore_nxv8f64(<vscale x 8 x double> %val, ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv8f64.p0(<vscale x 8 x double> %val, <vscale x 8 x double>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv8f64.p0(<vscale x 8 x double> %val, ptr %ptr, <vscale x 8 x i1> %m, i32 %evl)
   ret void
 }
 
-define void @vpstore_nxv1i8_allones_mask(<vscale x 1 x i8> %val, <vscale x 1 x i8>* %ptr, i32 zeroext %evl) {
+define void @vpstore_nxv1i8_allones_mask(<vscale x 1 x i8> %val, ptr %ptr, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv1i8_allones_mask:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -360,13 +360,13 @@ define void @vpstore_nxv1i8_allones_mask(<vscale x 1 x i8> %val, <vscale x 1 x i
 ; CHECK-NEXT:    ret
   %a = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
   %b = shufflevector <vscale x 1 x i1> %a, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  call void @llvm.vp.store.nxv1i8.p0(<vscale x 1 x i8> %val, <vscale x 1 x i8>* %ptr, <vscale x 1 x i1> %b, i32 %evl)
+  call void @llvm.vp.store.nxv1i8.p0(<vscale x 1 x i8> %val, ptr %ptr, <vscale x 1 x i1> %b, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv16f64.p0(<vscale x 16 x double>, <vscale x 16 x double>*, <vscale x 16 x i1>, i32)
+declare void @llvm.vp.store.nxv16f64.p0(<vscale x 16 x double>, ptr, <vscale x 16 x i1>, i32)
 
-define void @vpstore_nxv16f64(<vscale x 16 x double> %val, <vscale x 16 x double>* %ptr, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+define void @vpstore_nxv16f64(<vscale x 16 x double> %val, ptr %ptr, <vscale x 16 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv16f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a2, vlenb
@@ -389,15 +389,15 @@ define void @vpstore_nxv16f64(<vscale x 16 x double> %val, <vscale x 16 x double
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    vse64.v v16, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv16f64.p0(<vscale x 16 x double> %val, <vscale x 16 x double>* %ptr, <vscale x 16 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv16f64.p0(<vscale x 16 x double> %val, ptr %ptr, <vscale x 16 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv17f64.p0(<vscale x 17 x double>, <vscale x 17 x double>*, <vscale x 17 x i1>, i32)
+declare void @llvm.vp.store.nxv17f64.p0(<vscale x 17 x double>, ptr, <vscale x 17 x i1>, i32)
 
 ; Widen to nxv32f64 then split into 4 x nxv8f64, of which 1 is empty.
 
-define void @vpstore_nxv17f64(<vscale x 17 x double> %val, <vscale x 17 x double>* %ptr, <vscale x 17 x i1> %m, i32 zeroext %evl) {
+define void @vpstore_nxv17f64(<vscale x 17 x double> %val, ptr %ptr, <vscale x 17 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv17f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a3, vlenb
@@ -458,6 +458,6 @@ define void @vpstore_nxv17f64(<vscale x 17 x double> %val, <vscale x 17 x double
 ; CHECK-NEXT:    add sp, sp, a0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv17f64.p0(<vscale x 17 x double> %val, <vscale x 17 x double>* %ptr, <vscale x 17 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv17f64.p0(<vscale x 17 x double> %val, ptr %ptr, <vscale x 17 x i1> %m, i32 %evl)
   ret void
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vse.ll b/llvm/test/CodeGen/RISCV/rvv/vse.ll
index 9bf06f19aa322..595ce888682a6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vse.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vse.ll
@@ -6,10 +6,10 @@
 
 declare void @llvm.riscv.vse.nxv1i64(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   iXLen);
 
-define void @intrinsic_vse_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, iXLen %2) nounwind {
+define void @intrinsic_vse_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -18,7 +18,7 @@ define void @intrinsic_vse_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1
 entry:
   call void @llvm.riscv.vse.nxv1i64(
     <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
+    ptr %1,
     iXLen %2)
 
   ret void
@@ -26,11 +26,11 @@ entry:
 
 declare void @llvm.riscv.vse.mask.nxv1i64(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -39,7 +39,7 @@ define void @intrinsic_vse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale
 entry:
   call void @llvm.riscv.vse.mask.nxv1i64(
     <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
+    ptr %1,
     <vscale x 1 x i1> %2,
     iXLen %3)
 
@@ -48,10 +48,10 @@ entry:
 
 declare void @llvm.riscv.vse.nxv2i64(
   <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
+  ptr,
   iXLen);
 
-define void @intrinsic_vse_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, iXLen %2) nounwind {
+define void @intrinsic_vse_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -60,7 +60,7 @@ define void @intrinsic_vse_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2
 entry:
   call void @llvm.riscv.vse.nxv2i64(
     <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
+    ptr %1,
     iXLen %2)
 
   ret void
@@ -68,11 +68,11 @@ entry:
 
 declare void @llvm.riscv.vse.mask.nxv2i64(
   <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
+  ptr,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vse_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -81,7 +81,7 @@ define void @intrinsic_vse_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale
 entry:
   call void @llvm.riscv.vse.mask.nxv2i64(
     <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
+    ptr %1,
     <vscale x 2 x i1> %2,
     iXLen %3)
 
@@ -90,10 +90,10 @@ entry:
 
 declare void @llvm.riscv.vse.nxv4i64(
   <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
+  ptr,
   iXLen);
 
-define void @intrinsic_vse_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, iXLen %2) nounwind {
+define void @intrinsic_vse_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -102,7 +102,7 @@ define void @intrinsic_vse_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4
 entry:
   call void @llvm.riscv.vse.nxv4i64(
     <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
+    ptr %1,
     iXLen %2)
 
   ret void
@@ -110,11 +110,11 @@ entry:
 
 declare void @llvm.riscv.vse.mask.nxv4i64(
   <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
+  ptr,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vse_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -123,7 +123,7 @@ define void @intrinsic_vse_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale
 entry:
   call void @llvm.riscv.vse.mask.nxv4i64(
     <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
+    ptr %1,
     <vscale x 4 x i1> %2,
     iXLen %3)
 
@@ -132,10 +132,10 @@ entry:
 
 declare void @llvm.riscv.vse.nxv8i64(
   <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
+  ptr,
   iXLen);
 
-define void @intrinsic_vse_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, iXLen %2) nounwind {
+define void @intrinsic_vse_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -144,7 +144,7 @@ define void @intrinsic_vse_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8
 entry:
   call void @llvm.riscv.vse.nxv8i64(
     <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
+    ptr %1,
     iXLen %2)
 
   ret void
@@ -152,11 +152,11 @@ entry:
 
 declare void @llvm.riscv.vse.mask.nxv8i64(
   <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
+  ptr,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vse_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -165,7 +165,7 @@ define void @intrinsic_vse_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale
 entry:
   call void @llvm.riscv.vse.mask.nxv8i64(
     <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
+    ptr %1,
     <vscale x 8 x i1> %2,
     iXLen %3)
 
@@ -174,10 +174,10 @@ entry:
 
 declare void @llvm.riscv.vse.nxv1f64(
   <vscale x 1 x double>,
-  <vscale x 1 x double>*,
+  ptr,
   iXLen);
 
-define void @intrinsic_vse_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, iXLen %2) nounwind {
+define void @intrinsic_vse_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -186,7 +186,7 @@ define void @intrinsic_vse_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x
 entry:
   call void @llvm.riscv.vse.nxv1f64(
     <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
+    ptr %1,
     iXLen %2)
 
   ret void
@@ -194,11 +194,11 @@ entry:
 
 declare void @llvm.riscv.vse.mask.nxv1f64(
   <vscale x 1 x double>,
-  <vscale x 1 x double>*,
+  ptr,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vse_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -207,7 +207,7 @@ define void @intrinsic_vse_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vsc
 entry:
   call void @llvm.riscv.vse.mask.nxv1f64(
     <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
+    ptr %1,
     <vscale x 1 x i1> %2,
     iXLen %3)
 
@@ -216,10 +216,10 @@ entry:
 
 declare void @llvm.riscv.vse.nxv2f64(
   <vscale x 2 x double>,
-  <vscale x 2 x double>*,
+  ptr,
   iXLen);
 
-define void @intrinsic_vse_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, iXLen %2) nounwind {
+define void @intrinsic_vse_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -228,7 +228,7 @@ define void @intrinsic_vse_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x
 entry:
   call void @llvm.riscv.vse.nxv2f64(
     <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
+    ptr %1,
     iXLen %2)
 
   ret void
@@ -236,11 +236,11 @@ entry:
 
 declare void @llvm.riscv.vse.mask.nxv2f64(
   <vscale x 2 x double>,
-  <vscale x 2 x double>*,
+  ptr,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vse_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -249,7 +249,7 @@ define void @intrinsic_vse_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vsc
 entry:
   call void @llvm.riscv.vse.mask.nxv2f64(
     <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
+    ptr %1,
     <vscale x 2 x i1> %2,
     iXLen %3)
 
@@ -258,10 +258,10 @@ entry:
 
 declare void @llvm.riscv.vse.nxv4f64(
   <vscale x 4 x double>,
-  <vscale x 4 x double>*,
+  ptr,
   iXLen);
 
-define void @intrinsic_vse_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, iXLen %2) nounwind {
+define void @intrinsic_vse_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -270,7 +270,7 @@ define void @intrinsic_vse_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x
 entry:
   call void @llvm.riscv.vse.nxv4f64(
     <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
+    ptr %1,
     iXLen %2)
 
   ret void
@@ -278,11 +278,11 @@ entry:
 
 declare void @llvm.riscv.vse.mask.nxv4f64(
   <vscale x 4 x double>,
-  <vscale x 4 x double>*,
+  ptr,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vse_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -291,7 +291,7 @@ define void @intrinsic_vse_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vsc
 entry:
   call void @llvm.riscv.vse.mask.nxv4f64(
     <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
+    ptr %1,
     <vscale x 4 x i1> %2,
     iXLen %3)
 
@@ -300,10 +300,10 @@ entry:
 
 declare void @llvm.riscv.vse.nxv8f64(
   <vscale x 8 x double>,
-  <vscale x 8 x double>*,
+  ptr,
   iXLen);
 
-define void @intrinsic_vse_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, iXLen %2) nounwind {
+define void @intrinsic_vse_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv8f64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -312,7 +312,7 @@ define void @intrinsic_vse_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x
 entry:
   call void @llvm.riscv.vse.nxv8f64(
     <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
+    ptr %1,
     iXLen %2)
 
   ret void
@@ -320,11 +320,11 @@ entry:
 
 declare void @llvm.riscv.vse.mask.nxv8f64(
   <vscale x 8 x double>,
-  <vscale x 8 x double>*,
+  ptr,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vse_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8f64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -333,7 +333,7 @@ define void @intrinsic_vse_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vsc
 entry:
   call void @llvm.riscv.vse.mask.nxv8f64(
     <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
+    ptr %1,
     <vscale x 8 x i1> %2,
     iXLen %3)
 
@@ -342,10 +342,10 @@ entry:
 
 declare void @llvm.riscv.vse.nxv1i32(
   <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
+  ptr,
   iXLen);
 
-define void @intrinsic_vse_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, iXLen %2) nounwind {
+define void @intrinsic_vse_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -354,7 +354,7 @@ define void @intrinsic_vse_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1
 entry:
   call void @llvm.riscv.vse.nxv1i32(
     <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
+    ptr %1,
     iXLen %2)
 
   ret void
@@ -362,11 +362,11 @@ entry:
 
 declare void @llvm.riscv.vse.mask.nxv1i32(
   <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
+  ptr,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vse_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -375,7 +375,7 @@ define void @intrinsic_vse_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale
 entry:
   call void @llvm.riscv.vse.mask.nxv1i32(
     <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
+    ptr %1,
     <vscale x 1 x i1> %2,
     iXLen %3)
 
@@ -384,10 +384,10 @@ entry:
 
 declare void @llvm.riscv.vse.nxv2i32(
   <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
+  ptr,
   iXLen);
 
-define void @intrinsic_vse_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, iXLen %2) nounwind {
+define void @intrinsic_vse_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -396,7 +396,7 @@ define void @intrinsic_vse_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2
 entry:
   call void @llvm.riscv.vse.nxv2i32(
     <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
+    ptr %1,
     iXLen %2)
 
   ret void
@@ -404,11 +404,11 @@ entry:
 
 declare void @llvm.riscv.vse.mask.nxv2i32(
   <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
+  ptr,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vse_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -417,7 +417,7 @@ define void @intrinsic_vse_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale
 entry:
   call void @llvm.riscv.vse.mask.nxv2i32(
     <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
+    ptr %1,
     <vscale x 2 x i1> %2,
     iXLen %3)
 
@@ -426,10 +426,10 @@ entry:
 
 declare void @llvm.riscv.vse.nxv4i32(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   iXLen);
 
-define void @intrinsic_vse_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, iXLen %2) nounwind {
+define void @intrinsic_vse_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -438,7 +438,7 @@ define void @intrinsic_vse_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4
 entry:
   call void @llvm.riscv.vse.nxv4i32(
     <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
+    ptr %1,
     iXLen %2)
 
   ret void
@@ -446,11 +446,11 @@ entry:
 
 declare void @llvm.riscv.vse.mask.nxv4i32(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vse_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -459,7 +459,7 @@ define void @intrinsic_vse_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale
 entry:
   call void @llvm.riscv.vse.mask.nxv4i32(
     <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
+    ptr %1,
     <vscale x 4 x i1> %2,
     iXLen %3)
 
@@ -468,10 +468,10 @@ entry:
 
 declare void @llvm.riscv.vse.nxv8i32(
   <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
+  ptr,
   iXLen);
 
-define void @intrinsic_vse_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, iXLen %2) nounwind {
+define void @intrinsic_vse_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -480,7 +480,7 @@ define void @intrinsic_vse_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8
 entry:
   call void @llvm.riscv.vse.nxv8i32(
     <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
+    ptr %1,
     iXLen %2)
 
   ret void
@@ -488,11 +488,11 @@ entry:
 
 declare void @llvm.riscv.vse.mask.nxv8i32(
   <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
+  ptr,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vse_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -501,7 +501,7 @@ define void @intrinsic_vse_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale
 entry:
   call void @llvm.riscv.vse.mask.nxv8i32(
     <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
+    ptr %1,
     <vscale x 8 x i1> %2,
     iXLen %3)
 
@@ -510,10 +510,10 @@ entry:
 
 declare void @llvm.riscv.vse.nxv16i32(
   <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
+  ptr,
   iXLen);
 
-define void @intrinsic_vse_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, iXLen %2) nounwind {
+define void @intrinsic_vse_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -522,7 +522,7 @@ define void @intrinsic_vse_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x
 entry:
   call void @llvm.riscv.vse.nxv16i32(
     <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
+    ptr %1,
     iXLen %2)
 
   ret void
@@ -530,11 +530,11 @@ entry:
 
 declare void @llvm.riscv.vse.mask.nxv16i32(
   <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
+  ptr,
   <vscale x 16 x i1>,
   iXLen);
 
-define void @intrinsic_vse_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -543,7 +543,7 @@ define void @intrinsic_vse_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vsc
 entry:
   call void @llvm.riscv.vse.mask.nxv16i32(
     <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
+    ptr %1,
     <vscale x 16 x i1> %2,
     iXLen %3)
 
@@ -552,10 +552,10 @@ entry:
 
 declare void @llvm.riscv.vse.nxv1f32(
   <vscale x 1 x float>,
-  <vscale x 1 x float>*,
+  ptr,
   iXLen);
 
-define void @intrinsic_vse_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, iXLen %2) nounwind {
+define void @intrinsic_vse_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -564,7 +564,7 @@ define void @intrinsic_vse_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x
 entry:
   call void @llvm.riscv.vse.nxv1f32(
     <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
+    ptr %1,
     iXLen %2)
 
   ret void
@@ -572,11 +572,11 @@ entry:
 
 declare void @llvm.riscv.vse.mask.nxv1f32(
   <vscale x 1 x float>,
-  <vscale x 1 x float>*,
+  ptr,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vse_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -585,7 +585,7 @@ define void @intrinsic_vse_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vsca
 entry:
   call void @llvm.riscv.vse.mask.nxv1f32(
     <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
+    ptr %1,
     <vscale x 1 x i1> %2,
     iXLen %3)
 
@@ -594,10 +594,10 @@ entry:
 
 declare void @llvm.riscv.vse.nxv2f32(
   <vscale x 2 x float>,
-  <vscale x 2 x float>*,
+  ptr,
   iXLen);
 
-define void @intrinsic_vse_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, iXLen %2) nounwind {
+define void @intrinsic_vse_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -606,7 +606,7 @@ define void @intrinsic_vse_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x
 entry:
   call void @llvm.riscv.vse.nxv2f32(
     <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
+    ptr %1,
     iXLen %2)
 
   ret void
@@ -614,11 +614,11 @@ entry:
 
 declare void @llvm.riscv.vse.mask.nxv2f32(
   <vscale x 2 x float>,
-  <vscale x 2 x float>*,
+  ptr,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vse_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -627,7 +627,7 @@ define void @intrinsic_vse_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vsca
 entry:
   call void @llvm.riscv.vse.mask.nxv2f32(
     <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
+    ptr %1,
     <vscale x 2 x i1> %2,
     iXLen %3)
 
@@ -636,10 +636,10 @@ entry:
 
 declare void @llvm.riscv.vse.nxv4f32(
   <vscale x 4 x float>,
-  <vscale x 4 x float>*,
+  ptr,
   iXLen);
 
-define void @intrinsic_vse_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, iXLen %2) nounwind {
+define void @intrinsic_vse_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -648,7 +648,7 @@ define void @intrinsic_vse_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x
 entry:
   call void @llvm.riscv.vse.nxv4f32(
     <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
+    ptr %1,
     iXLen %2)
 
   ret void
@@ -656,11 +656,11 @@ entry:
 
 declare void @llvm.riscv.vse.mask.nxv4f32(
   <vscale x 4 x float>,
-  <vscale x 4 x float>*,
+  ptr,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vse_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -669,7 +669,7 @@ define void @intrinsic_vse_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vsca
 entry:
   call void @llvm.riscv.vse.mask.nxv4f32(
     <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
+    ptr %1,
     <vscale x 4 x i1> %2,
     iXLen %3)
 
@@ -678,10 +678,10 @@ entry:
 
 declare void @llvm.riscv.vse.nxv8f32(
   <vscale x 8 x float>,
-  <vscale x 8 x float>*,
+  ptr,
   iXLen);
 
-define void @intrinsic_vse_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, iXLen %2) nounwind {
+define void @intrinsic_vse_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -690,7 +690,7 @@ define void @intrinsic_vse_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x
 entry:
   call void @llvm.riscv.vse.nxv8f32(
     <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
+    ptr %1,
     iXLen %2)
 
   ret void
@@ -698,11 +698,11 @@ entry:
 
 declare void @llvm.riscv.vse.mask.nxv8f32(
   <vscale x 8 x float>,
-  <vscale x 8 x float>*,
+  ptr,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vse_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -711,7 +711,7 @@ define void @intrinsic_vse_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vsca
 entry:
   call void @llvm.riscv.vse.mask.nxv8f32(
     <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
+    ptr %1,
     <vscale x 8 x i1> %2,
     iXLen %3)
 
@@ -720,10 +720,10 @@ entry:
 
 declare void @llvm.riscv.vse.nxv16f32(
   <vscale x 16 x float>,
-  <vscale x 16 x float>*,
+  ptr,
   iXLen);
 
-define void @intrinsic_vse_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, iXLen %2) nounwind {
+define void @intrinsic_vse_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv16f32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -732,7 +732,7 @@ define void @intrinsic_vse_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale
 entry:
   call void @llvm.riscv.vse.nxv16f32(
     <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
+    ptr %1,
     iXLen %2)
 
   ret void
@@ -740,11 +740,11 @@ entry:
 
 declare void @llvm.riscv.vse.mask.nxv16f32(
   <vscale x 16 x float>,
-  <vscale x 16 x float>*,
+  ptr,
   <vscale x 16 x i1>,
   iXLen);
 
-define void @intrinsic_vse_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv16f32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -753,7 +753,7 @@ define void @intrinsic_vse_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <v
 entry:
   call void @llvm.riscv.vse.mask.nxv16f32(
     <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
+    ptr %1,
     <vscale x 16 x i1> %2,
     iXLen %3)
 
@@ -762,10 +762,10 @@ entry:
 
 declare void @llvm.riscv.vse.nxv1i16(
   <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
+  ptr,
   iXLen);
 
-define void @intrinsic_vse_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, iXLen %2) nounwind {
+define void @intrinsic_vse_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -774,7 +774,7 @@ define void @intrinsic_vse_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1
 entry:
   call void @llvm.riscv.vse.nxv1i16(
     <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
+    ptr %1,
     iXLen %2)
 
   ret void
@@ -782,11 +782,11 @@ entry:
 
 declare void @llvm.riscv.vse.mask.nxv1i16(
   <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
+  ptr,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vse_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -795,7 +795,7 @@ define void @intrinsic_vse_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale
 entry:
   call void @llvm.riscv.vse.mask.nxv1i16(
     <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
+    ptr %1,
     <vscale x 1 x i1> %2,
     iXLen %3)
 
@@ -804,10 +804,10 @@ entry:
 
 declare void @llvm.riscv.vse.nxv2i16(
   <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
+  ptr,
   iXLen);
 
-define void @intrinsic_vse_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, iXLen %2) nounwind {
+define void @intrinsic_vse_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -816,7 +816,7 @@ define void @intrinsic_vse_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2
 entry:
   call void @llvm.riscv.vse.nxv2i16(
     <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
+    ptr %1,
     iXLen %2)
 
   ret void
@@ -824,11 +824,11 @@ entry:
 
 declare void @llvm.riscv.vse.mask.nxv2i16(
   <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
+  ptr,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vse_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -837,7 +837,7 @@ define void @intrinsic_vse_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale
 entry:
   call void @llvm.riscv.vse.mask.nxv2i16(
     <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
+    ptr %1,
     <vscale x 2 x i1> %2,
     iXLen %3)
 
@@ -846,10 +846,10 @@ entry:
 
 declare void @llvm.riscv.vse.nxv4i16(
   <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
+  ptr,
   iXLen);
 
-define void @intrinsic_vse_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, iXLen %2) nounwind {
+define void @intrinsic_vse_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -858,7 +858,7 @@ define void @intrinsic_vse_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4
 entry:
   call void @llvm.riscv.vse.nxv4i16(
     <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
+    ptr %1,
     iXLen %2)
 
   ret void
@@ -866,11 +866,11 @@ entry:
 
 declare void @llvm.riscv.vse.mask.nxv4i16(
   <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
+  ptr,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vse_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -879,7 +879,7 @@ define void @intrinsic_vse_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale
 entry:
   call void @llvm.riscv.vse.mask.nxv4i16(
     <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
+    ptr %1,
     <vscale x 4 x i1> %2,
     iXLen %3)
 
@@ -888,10 +888,10 @@ entry:
 
 declare void @llvm.riscv.vse.nxv8i16(
   <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
+  ptr,
   iXLen);
 
-define void @intrinsic_vse_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, iXLen %2) nounwind {
+define void @intrinsic_vse_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -900,7 +900,7 @@ define void @intrinsic_vse_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8
 entry:
   call void @llvm.riscv.vse.nxv8i16(
     <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
+    ptr %1,
     iXLen %2)
 
   ret void
@@ -908,11 +908,11 @@ entry:
 
 declare void @llvm.riscv.vse.mask.nxv8i16(
   <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
+  ptr,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vse_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -921,7 +921,7 @@ define void @intrinsic_vse_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale
 entry:
   call void @llvm.riscv.vse.mask.nxv8i16(
     <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
+    ptr %1,
     <vscale x 8 x i1> %2,
     iXLen %3)
 
@@ -930,10 +930,10 @@ entry:
 
 declare void @llvm.riscv.vse.nxv16i16(
   <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
+  ptr,
   iXLen);
 
-define void @intrinsic_vse_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, iXLen %2) nounwind {
+define void @intrinsic_vse_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -942,7 +942,7 @@ define void @intrinsic_vse_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x
 entry:
   call void @llvm.riscv.vse.nxv16i16(
     <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
+    ptr %1,
     iXLen %2)
 
   ret void
@@ -950,11 +950,11 @@ entry:
 
 declare void @llvm.riscv.vse.mask.nxv16i16(
   <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
+  ptr,
   <vscale x 16 x i1>,
   iXLen);
 
-define void @intrinsic_vse_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -963,7 +963,7 @@ define void @intrinsic_vse_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vsc
 entry:
   call void @llvm.riscv.vse.mask.nxv16i16(
     <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
+    ptr %1,
     <vscale x 16 x i1> %2,
     iXLen %3)
 
@@ -972,10 +972,10 @@ entry:
 
 declare void @llvm.riscv.vse.nxv32i16(
   <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
+  ptr,
   iXLen);
 
-define void @intrinsic_vse_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, iXLen %2) nounwind {
+define void @intrinsic_vse_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -984,7 +984,7 @@ define void @intrinsic_vse_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x
 entry:
   call void @llvm.riscv.vse.nxv32i16(
     <vscale x 32 x i16> %0,
-    <vscale x 32 x i16>* %1,
+    ptr %1,
     iXLen %2)
 
   ret void
@@ -992,11 +992,11 @@ entry:
 
 declare void @llvm.riscv.vse.mask.nxv32i16(
   <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
+  ptr,
   <vscale x 32 x i1>,
   iXLen);
 
-define void @intrinsic_vse_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -1005,7 +1005,7 @@ define void @intrinsic_vse_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vsc
 entry:
   call void @llvm.riscv.vse.mask.nxv32i16(
     <vscale x 32 x i16> %0,
-    <vscale x 32 x i16>* %1,
+    ptr %1,
     <vscale x 32 x i1> %2,
     iXLen %3)
 
@@ -1014,10 +1014,10 @@ entry:
 
 declare void @llvm.riscv.vse.nxv1f16(
   <vscale x 1 x half>,
-  <vscale x 1 x half>*,
+  ptr,
   iXLen);
 
-define void @intrinsic_vse_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, iXLen %2) nounwind {
+define void @intrinsic_vse_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1026,7 +1026,7 @@ define void @intrinsic_vse_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1
 entry:
   call void @llvm.riscv.vse.nxv1f16(
     <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
+    ptr %1,
     iXLen %2)
 
   ret void
@@ -1034,11 +1034,11 @@ entry:
 
 declare void @llvm.riscv.vse.mask.nxv1f16(
   <vscale x 1 x half>,
-  <vscale x 1 x half>*,
+  ptr,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vse_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1047,7 +1047,7 @@ define void @intrinsic_vse_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscal
 entry:
   call void @llvm.riscv.vse.mask.nxv1f16(
     <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
+    ptr %1,
     <vscale x 1 x i1> %2,
     iXLen %3)
 
@@ -1056,10 +1056,10 @@ entry:
 
 declare void @llvm.riscv.vse.nxv2f16(
   <vscale x 2 x half>,
-  <vscale x 2 x half>*,
+  ptr,
   iXLen);
 
-define void @intrinsic_vse_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, iXLen %2) nounwind {
+define void @intrinsic_vse_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1068,7 +1068,7 @@ define void @intrinsic_vse_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2
 entry:
   call void @llvm.riscv.vse.nxv2f16(
     <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
+    ptr %1,
     iXLen %2)
 
   ret void
@@ -1076,11 +1076,11 @@ entry:
 
 declare void @llvm.riscv.vse.mask.nxv2f16(
   <vscale x 2 x half>,
-  <vscale x 2 x half>*,
+  ptr,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vse_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1089,7 +1089,7 @@ define void @intrinsic_vse_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscal
 entry:
   call void @llvm.riscv.vse.mask.nxv2f16(
     <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
+    ptr %1,
     <vscale x 2 x i1> %2,
     iXLen %3)
 
@@ -1098,10 +1098,10 @@ entry:
 
 declare void @llvm.riscv.vse.nxv4f16(
   <vscale x 4 x half>,
-  <vscale x 4 x half>*,
+  ptr,
   iXLen);
 
-define void @intrinsic_vse_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, iXLen %2) nounwind {
+define void @intrinsic_vse_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1110,7 +1110,7 @@ define void @intrinsic_vse_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4
 entry:
   call void @llvm.riscv.vse.nxv4f16(
     <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
+    ptr %1,
     iXLen %2)
 
   ret void
@@ -1118,11 +1118,11 @@ entry:
 
 declare void @llvm.riscv.vse.mask.nxv4f16(
   <vscale x 4 x half>,
-  <vscale x 4 x half>*,
+  ptr,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vse_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1131,7 +1131,7 @@ define void @intrinsic_vse_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscal
 entry:
   call void @llvm.riscv.vse.mask.nxv4f16(
     <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
+    ptr %1,
     <vscale x 4 x i1> %2,
     iXLen %3)
 
@@ -1140,10 +1140,10 @@ entry:
 
 declare void @llvm.riscv.vse.nxv8f16(
   <vscale x 8 x half>,
-  <vscale x 8 x half>*,
+  ptr,
   iXLen);
 
-define void @intrinsic_vse_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, iXLen %2) nounwind {
+define void @intrinsic_vse_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1152,7 +1152,7 @@ define void @intrinsic_vse_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8
 entry:
   call void @llvm.riscv.vse.nxv8f16(
     <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
+    ptr %1,
     iXLen %2)
 
   ret void
@@ -1160,11 +1160,11 @@ entry:
 
 declare void @llvm.riscv.vse.mask.nxv8f16(
   <vscale x 8 x half>,
-  <vscale x 8 x half>*,
+  ptr,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vse_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1173,7 +1173,7 @@ define void @intrinsic_vse_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscal
 entry:
   call void @llvm.riscv.vse.mask.nxv8f16(
     <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
+    ptr %1,
     <vscale x 8 x i1> %2,
     iXLen %3)
 
@@ -1182,10 +1182,10 @@ entry:
 
 declare void @llvm.riscv.vse.nxv16f16(
   <vscale x 16 x half>,
-  <vscale x 16 x half>*,
+  ptr,
   iXLen);
 
-define void @intrinsic_vse_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, iXLen %2) nounwind {
+define void @intrinsic_vse_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1194,7 +1194,7 @@ define void @intrinsic_vse_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale
 entry:
   call void @llvm.riscv.vse.nxv16f16(
     <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
+    ptr %1,
     iXLen %2)
 
   ret void
@@ -1202,11 +1202,11 @@ entry:
 
 declare void @llvm.riscv.vse.mask.nxv16f16(
   <vscale x 16 x half>,
-  <vscale x 16 x half>*,
+  ptr,
   <vscale x 16 x i1>,
   iXLen);
 
-define void @intrinsic_vse_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1215,7 +1215,7 @@ define void @intrinsic_vse_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vs
 entry:
   call void @llvm.riscv.vse.mask.nxv16f16(
     <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
+    ptr %1,
     <vscale x 16 x i1> %2,
     iXLen %3)
 
@@ -1224,10 +1224,10 @@ entry:
 
 declare void @llvm.riscv.vse.nxv32f16(
   <vscale x 32 x half>,
-  <vscale x 32 x half>*,
+  ptr,
   iXLen);
 
-define void @intrinsic_vse_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, iXLen %2) nounwind {
+define void @intrinsic_vse_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv32f16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -1236,7 +1236,7 @@ define void @intrinsic_vse_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale
 entry:
   call void @llvm.riscv.vse.nxv32f16(
     <vscale x 32 x half> %0,
-    <vscale x 32 x half>* %1,
+    ptr %1,
     iXLen %2)
 
   ret void
@@ -1244,11 +1244,11 @@ entry:
 
 declare void @llvm.riscv.vse.mask.nxv32f16(
   <vscale x 32 x half>,
-  <vscale x 32 x half>*,
+  ptr,
   <vscale x 32 x i1>,
   iXLen);
 
-define void @intrinsic_vse_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv32f16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -1257,7 +1257,7 @@ define void @intrinsic_vse_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vs
 entry:
   call void @llvm.riscv.vse.mask.nxv32f16(
     <vscale x 32 x half> %0,
-    <vscale x 32 x half>* %1,
+    ptr %1,
     <vscale x 32 x i1> %2,
     iXLen %3)
 
@@ -1266,10 +1266,10 @@ entry:
 
 declare void @llvm.riscv.vse.nxv1bf16(
   <vscale x 1 x bfloat>,
-  <vscale x 1 x bfloat>*,
+  ptr,
   iXLen);
 
-define void @intrinsic_vse_v_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat>* %1, iXLen %2) nounwind {
+define void @intrinsic_vse_v_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv1bf16_nxv1bf16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1278,7 +1278,7 @@ define void @intrinsic_vse_v_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale
 entry:
   call void @llvm.riscv.vse.nxv1bf16(
     <vscale x 1 x bfloat> %0,
-    <vscale x 1 x bfloat>* %1,
+    ptr %1,
     iXLen %2)
 
   ret void
@@ -1286,11 +1286,11 @@ entry:
 
 declare void @llvm.riscv.vse.mask.nxv1bf16(
   <vscale x 1 x bfloat>,
-  <vscale x 1 x bfloat>*,
+  ptr,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vse_mask_v_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1bf16_nxv1bf16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1299,7 +1299,7 @@ define void @intrinsic_vse_mask_v_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <v
 entry:
   call void @llvm.riscv.vse.mask.nxv1bf16(
     <vscale x 1 x bfloat> %0,
-    <vscale x 1 x bfloat>* %1,
+    ptr %1,
     <vscale x 1 x i1> %2,
     iXLen %3)
 
@@ -1308,10 +1308,10 @@ entry:
 
 declare void @llvm.riscv.vse.nxv2bf16(
   <vscale x 2 x bfloat>,
-  <vscale x 2 x bfloat>*,
+  ptr,
   iXLen);
 
-define void @intrinsic_vse_v_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat>* %1, iXLen %2) nounwind {
+define void @intrinsic_vse_v_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv2bf16_nxv2bf16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1320,7 +1320,7 @@ define void @intrinsic_vse_v_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale
 entry:
   call void @llvm.riscv.vse.nxv2bf16(
     <vscale x 2 x bfloat> %0,
-    <vscale x 2 x bfloat>* %1,
+    ptr %1,
     iXLen %2)
 
   ret void
@@ -1328,11 +1328,11 @@ entry:
 
 declare void @llvm.riscv.vse.mask.nxv2bf16(
   <vscale x 2 x bfloat>,
-  <vscale x 2 x bfloat>*,
+  ptr,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vse_mask_v_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat>* %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2bf16_nxv2bf16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1341,7 +1341,7 @@ define void @intrinsic_vse_mask_v_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <v
 entry:
   call void @llvm.riscv.vse.mask.nxv2bf16(
     <vscale x 2 x bfloat> %0,
-    <vscale x 2 x bfloat>* %1,
+    ptr %1,
     <vscale x 2 x i1> %2,
     iXLen %3)
 
@@ -1350,10 +1350,10 @@ entry:
 
 declare void @llvm.riscv.vse.nxv4bf16(
   <vscale x 4 x bfloat>,
-  <vscale x 4 x bfloat>*,
+  ptr,
   iXLen);
 
-define void @intrinsic_vse_v_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat>* %1, iXLen %2) nounwind {
+define void @intrinsic_vse_v_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv4bf16_nxv4bf16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1362,7 +1362,7 @@ define void @intrinsic_vse_v_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale
 entry:
   call void @llvm.riscv.vse.nxv4bf16(
     <vscale x 4 x bfloat> %0,
-    <vscale x 4 x bfloat>* %1,
+    ptr %1,
     iXLen %2)
 
   ret void
@@ -1370,11 +1370,11 @@ entry:
 
 declare void @llvm.riscv.vse.mask.nxv4bf16(
   <vscale x 4 x bfloat>,
-  <vscale x 4 x bfloat>*,
+  ptr,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vse_mask_v_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat>* %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4bf16_nxv4bf16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1383,7 +1383,7 @@ define void @intrinsic_vse_mask_v_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <v
 entry:
   call void @llvm.riscv.vse.mask.nxv4bf16(
     <vscale x 4 x bfloat> %0,
-    <vscale x 4 x bfloat>* %1,
+    ptr %1,
     <vscale x 4 x i1> %2,
     iXLen %3)
 
@@ -1392,10 +1392,10 @@ entry:
 
 declare void @llvm.riscv.vse.nxv8bf16(
   <vscale x 8 x bfloat>,
-  <vscale x 8 x bfloat>*,
+  ptr,
   iXLen);
 
-define void @intrinsic_vse_v_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat>* %1, iXLen %2) nounwind {
+define void @intrinsic_vse_v_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv8bf16_nxv8bf16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1404,7 +1404,7 @@ define void @intrinsic_vse_v_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale
 entry:
   call void @llvm.riscv.vse.nxv8bf16(
     <vscale x 8 x bfloat> %0,
-    <vscale x 8 x bfloat>* %1,
+    ptr %1,
     iXLen %2)
 
   ret void
@@ -1412,11 +1412,11 @@ entry:
 
 declare void @llvm.riscv.vse.mask.nxv8bf16(
   <vscale x 8 x bfloat>,
-  <vscale x 8 x bfloat>*,
+  ptr,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vse_mask_v_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat>* %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8bf16_nxv8bf16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1425,7 +1425,7 @@ define void @intrinsic_vse_mask_v_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <v
 entry:
   call void @llvm.riscv.vse.mask.nxv8bf16(
     <vscale x 8 x bfloat> %0,
-    <vscale x 8 x bfloat>* %1,
+    ptr %1,
     <vscale x 8 x i1> %2,
     iXLen %3)
 
@@ -1434,10 +1434,10 @@ entry:
 
 declare void @llvm.riscv.vse.nxv16bf16(
   <vscale x 16 x bfloat>,
-  <vscale x 16 x bfloat>*,
+  ptr,
   iXLen);
 
-define void @intrinsic_vse_v_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat>* %1, iXLen %2) nounwind {
+define void @intrinsic_vse_v_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv16bf16_nxv16bf16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1446,7 +1446,7 @@ define void @intrinsic_vse_v_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vsc
 entry:
   call void @llvm.riscv.vse.nxv16bf16(
     <vscale x 16 x bfloat> %0,
-    <vscale x 16 x bfloat>* %1,
+    ptr %1,
     iXLen %2)
 
   ret void
@@ -1454,11 +1454,11 @@ entry:
 
 declare void @llvm.riscv.vse.mask.nxv16bf16(
   <vscale x 16 x bfloat>,
-  <vscale x 16 x bfloat>*,
+  ptr,
   <vscale x 16 x i1>,
   iXLen);
 
-define void @intrinsic_vse_mask_v_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat>* %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv16bf16_nxv16bf16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1467,7 +1467,7 @@ define void @intrinsic_vse_mask_v_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0,
 entry:
   call void @llvm.riscv.vse.mask.nxv16bf16(
     <vscale x 16 x bfloat> %0,
-    <vscale x 16 x bfloat>* %1,
+    ptr %1,
     <vscale x 16 x i1> %2,
     iXLen %3)
 
@@ -1476,10 +1476,10 @@ entry:
 
 declare void @llvm.riscv.vse.nxv32bf16(
   <vscale x 32 x bfloat>,
-  <vscale x 32 x bfloat>*,
+  ptr,
   iXLen);
 
-define void @intrinsic_vse_v_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat>* %1, iXLen %2) nounwind {
+define void @intrinsic_vse_v_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv32bf16_nxv32bf16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -1488,7 +1488,7 @@ define void @intrinsic_vse_v_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vsc
 entry:
   call void @llvm.riscv.vse.nxv32bf16(
     <vscale x 32 x bfloat> %0,
-    <vscale x 32 x bfloat>* %1,
+    ptr %1,
     iXLen %2)
 
   ret void
@@ -1496,11 +1496,11 @@ entry:
 
 declare void @llvm.riscv.vse.mask.nxv32bf16(
   <vscale x 32 x bfloat>,
-  <vscale x 32 x bfloat>*,
+  ptr,
   <vscale x 32 x i1>,
   iXLen);
 
-define void @intrinsic_vse_mask_v_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat>* %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, ptr %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv32bf16_nxv32bf16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -1509,7 +1509,7 @@ define void @intrinsic_vse_mask_v_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0,
 entry:
   call void @llvm.riscv.vse.mask.nxv32bf16(
     <vscale x 32 x bfloat> %0,
-    <vscale x 32 x bfloat>* %1,
+    ptr %1,
     <vscale x 32 x i1> %2,
     iXLen %3)
 
@@ -1518,10 +1518,10 @@ entry:
 
 declare void @llvm.riscv.vse.nxv1i8(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   iXLen);
 
-define void @intrinsic_vse_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, iXLen %2) nounwind {
+define void @intrinsic_vse_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -1530,7 +1530,7 @@ define void @intrinsic_vse_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i
 entry:
   call void @llvm.riscv.vse.nxv1i8(
     <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
+    ptr %1,
     iXLen %2)
 
   ret void
@@ -1538,11 +1538,11 @@ entry:
 
 declare void @llvm.riscv.vse.mask.nxv1i8(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vse_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -1551,7 +1551,7 @@ define void @intrinsic_vse_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x
 entry:
   call void @llvm.riscv.vse.mask.nxv1i8(
     <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
+    ptr %1,
     <vscale x 1 x i1> %2,
     iXLen %3)
 
@@ -1560,10 +1560,10 @@ entry:
 
 declare void @llvm.riscv.vse.nxv2i8(
   <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
+  ptr,
   iXLen);
 
-define void @intrinsic_vse_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, iXLen %2) nounwind {
+define void @intrinsic_vse_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -1572,7 +1572,7 @@ define void @intrinsic_vse_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i
 entry:
   call void @llvm.riscv.vse.nxv2i8(
     <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
+    ptr %1,
     iXLen %2)
 
   ret void
@@ -1580,11 +1580,11 @@ entry:
 
 declare void @llvm.riscv.vse.mask.nxv2i8(
   <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
+  ptr,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vse_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -1593,7 +1593,7 @@ define void @intrinsic_vse_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x
 entry:
   call void @llvm.riscv.vse.mask.nxv2i8(
     <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
+    ptr %1,
     <vscale x 2 x i1> %2,
     iXLen %3)
 
@@ -1602,10 +1602,10 @@ entry:
 
 declare void @llvm.riscv.vse.nxv4i8(
   <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
+  ptr,
   iXLen);
 
-define void @intrinsic_vse_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, iXLen %2) nounwind {
+define void @intrinsic_vse_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -1614,7 +1614,7 @@ define void @intrinsic_vse_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i
 entry:
   call void @llvm.riscv.vse.nxv4i8(
     <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
+    ptr %1,
     iXLen %2)
 
   ret void
@@ -1622,11 +1622,11 @@ entry:
 
 declare void @llvm.riscv.vse.mask.nxv4i8(
   <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
+  ptr,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vse_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -1635,7 +1635,7 @@ define void @intrinsic_vse_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x
 entry:
   call void @llvm.riscv.vse.mask.nxv4i8(
     <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
+    ptr %1,
     <vscale x 4 x i1> %2,
     iXLen %3)
 
@@ -1644,10 +1644,10 @@ entry:
 
 declare void @llvm.riscv.vse.nxv8i8(
   <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
+  ptr,
   iXLen);
 
-define void @intrinsic_vse_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, iXLen %2) nounwind {
+define void @intrinsic_vse_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -1656,7 +1656,7 @@ define void @intrinsic_vse_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i
 entry:
   call void @llvm.riscv.vse.nxv8i8(
     <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
+    ptr %1,
     iXLen %2)
 
   ret void
@@ -1664,11 +1664,11 @@ entry:
 
 declare void @llvm.riscv.vse.mask.nxv8i8(
   <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
+  ptr,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vse_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -1677,7 +1677,7 @@ define void @intrinsic_vse_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x
 entry:
   call void @llvm.riscv.vse.mask.nxv8i8(
     <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
+    ptr %1,
     <vscale x 8 x i1> %2,
     iXLen %3)
 
@@ -1686,10 +1686,10 @@ entry:
 
 declare void @llvm.riscv.vse.nxv16i8(
   <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
+  ptr,
   iXLen);
 
-define void @intrinsic_vse_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, iXLen %2) nounwind {
+define void @intrinsic_vse_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -1698,7 +1698,7 @@ define void @intrinsic_vse_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16
 entry:
   call void @llvm.riscv.vse.nxv16i8(
     <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
+    ptr %1,
     iXLen %2)
 
   ret void
@@ -1706,11 +1706,11 @@ entry:
 
 declare void @llvm.riscv.vse.mask.nxv16i8(
   <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
+  ptr,
   <vscale x 16 x i1>,
   iXLen);
 
-define void @intrinsic_vse_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -1719,7 +1719,7 @@ define void @intrinsic_vse_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale
 entry:
   call void @llvm.riscv.vse.mask.nxv16i8(
     <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
+    ptr %1,
     <vscale x 16 x i1> %2,
     iXLen %3)
 
@@ -1728,10 +1728,10 @@ entry:
 
 declare void @llvm.riscv.vse.nxv32i8(
   <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
+  ptr,
   iXLen);
 
-define void @intrinsic_vse_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, iXLen %2) nounwind {
+define void @intrinsic_vse_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -1740,7 +1740,7 @@ define void @intrinsic_vse_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32
 entry:
   call void @llvm.riscv.vse.nxv32i8(
     <vscale x 32 x i8> %0,
-    <vscale x 32 x i8>* %1,
+    ptr %1,
     iXLen %2)
 
   ret void
@@ -1748,11 +1748,11 @@ entry:
 
 declare void @llvm.riscv.vse.mask.nxv32i8(
   <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
+  ptr,
   <vscale x 32 x i1>,
   iXLen);
 
-define void @intrinsic_vse_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -1761,7 +1761,7 @@ define void @intrinsic_vse_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale
 entry:
   call void @llvm.riscv.vse.mask.nxv32i8(
     <vscale x 32 x i8> %0,
-    <vscale x 32 x i8>* %1,
+    ptr %1,
     <vscale x 32 x i1> %2,
     iXLen %3)
 
@@ -1770,10 +1770,10 @@ entry:
 
 declare void @llvm.riscv.vse.nxv64i8(
   <vscale x 64 x i8>,
-  <vscale x 64 x i8>*,
+  ptr,
   iXLen);
 
-define void @intrinsic_vse_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, iXLen %2) nounwind {
+define void @intrinsic_vse_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
@@ -1782,7 +1782,7 @@ define void @intrinsic_vse_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64
 entry:
   call void @llvm.riscv.vse.nxv64i8(
     <vscale x 64 x i8> %0,
-    <vscale x 64 x i8>* %1,
+    ptr %1,
     iXLen %2)
 
   ret void
@@ -1790,11 +1790,11 @@ entry:
 
 declare void @llvm.riscv.vse.mask.nxv64i8(
   <vscale x 64 x i8>,
-  <vscale x 64 x i8>*,
+  ptr,
   <vscale x 64 x i1>,
   iXLen);
 
-define void @intrinsic_vse_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, ptr %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
@@ -1803,7 +1803,7 @@ define void @intrinsic_vse_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale
 entry:
   call void @llvm.riscv.vse.mask.nxv64i8(
     <vscale x 64 x i8> %0,
-    <vscale x 64 x i8>* %1,
+    ptr %1,
     <vscale x 64 x i1> %2,
     iXLen %3)
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll
index 59280e2ec2a8a..3faceb0aa6b61 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll
@@ -495,7 +495,7 @@ define <vscale x 16 x double> @vselect_combine_regression(<vscale x 16 x i64> %v
   ret <vscale x 16 x double> %sel
 }
 
-define void @vselect_legalize_regression(<vscale x 16 x double> %a, <vscale x 16 x i1> %ma, <vscale x 16 x i1> %mb, <vscale x 16 x double>* %out) {
+define void @vselect_legalize_regression(<vscale x 16 x double> %a, <vscale x 16 x i1> %ma, <vscale x 16 x i1> %mb, ptr %out) {
 ; CHECK-LABEL: vselect_legalize_regression:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a2, zero, e8, m2, ta, ma
@@ -517,6 +517,6 @@ define void @vselect_legalize_regression(<vscale x 16 x double> %a, <vscale x 16
 ; CHECK-NEXT:    ret
   %cond = and <vscale x 16 x i1> %ma, %mb
   %sel = select <vscale x 16 x i1> %cond, <vscale x 16 x double> %a, <vscale x 16 x double> zeroinitializer
-  store <vscale x 16 x double> %sel, <vscale x 16 x double>* %out
+  store <vscale x 16 x double> %sel, ptr %out
   ret void
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
index ff39bd2580b7f..621445fb2dc5e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
@@ -17,8 +17,8 @@ declare <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.nxv1f64(<vscale x 1 x do
 declare <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64.f64(<vscale x 1 x double>, double, i64)
 declare <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32.f32( <vscale x 2 x float>, float, i64)
 
-declare void @llvm.riscv.vse.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>* nocapture, i64)
-declare void @llvm.riscv.vse.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>* nocapture, i64)
+declare void @llvm.riscv.vse.nxv1f64(<vscale x 1 x double>, ptr nocapture, i64)
+declare void @llvm.riscv.vse.nxv2f32(<vscale x 2 x float>, ptr nocapture, i64)
 
 define <vscale x 1 x double> @test1(i64 %avl, i8 zeroext %cond, <vscale x 1 x double> %a, <vscale x 1 x double> %b) nounwind {
 ; CHECK-LABEL: test1:
@@ -159,16 +159,16 @@ if.then:                                          ; preds = %entry
   %0 = tail call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64.f64(<vscale x 1 x double> undef, double 1.000000e+00, i64 %avl)
   %1 = tail call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64.f64(<vscale x 1 x double> undef, double 2.000000e+00, i64 %avl)
   %2 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 7, i64 %avl)
-  %3 = bitcast i8* @scratch to <vscale x 1 x double>*
-  tail call void @llvm.riscv.vse.nxv1f64(<vscale x 1 x double> %2, <vscale x 1 x double>* %3, i64 %avl)
+  %3 = bitcast ptr @scratch to ptr
+  tail call void @llvm.riscv.vse.nxv1f64(<vscale x 1 x double> %2, ptr %3, i64 %avl)
   br label %if.end
 
 if.else:                                          ; preds = %entry
   %4 = tail call <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32.f32(<vscale x 2 x float> undef, float 1.000000e+00, i64 %avl)
   %5 = tail call <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32.f32(<vscale x 2 x float> undef, float 2.000000e+00, i64 %avl)
   %6 = tail call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> %4, <vscale x 2 x float> %5, i64 7, i64 %avl)
-  %7 = bitcast i8* @scratch to <vscale x 2 x float>*
-  tail call void @llvm.riscv.vse.nxv2f32(<vscale x 2 x float> %6, <vscale x 2 x float>* %7, i64 %avl)
+  %7 = bitcast ptr @scratch to ptr
+  tail call void @llvm.riscv.vse.nxv2f32(<vscale x 2 x float> %6, ptr %7, i64 %avl)
   br label %if.end
 
 if.end:                                           ; preds = %if.else, %if.then
@@ -300,8 +300,8 @@ if.then4:                                         ; preds = %if.end
   %4 = tail call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64.f64(<vscale x 1 x double> undef, double 1.000000e+00, i64 %3)
   %5 = tail call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64.f64(<vscale x 1 x double> undef, double 2.000000e+00, i64 %3)
   %6 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %4, <vscale x 1 x double> %5, i64 7, i64 %3)
-  %7 = bitcast i8* @scratch to <vscale x 1 x double>*
-  tail call void @llvm.riscv.vse.nxv1f64(<vscale x 1 x double> %6, <vscale x 1 x double>* %7, i64 %3)
+  %7 = bitcast ptr @scratch to ptr
+  tail call void @llvm.riscv.vse.nxv1f64(<vscale x 1 x double> %6, ptr %7, i64 %3)
   br label %if.end10
 
 if.else5:                                         ; preds = %if.end
@@ -309,8 +309,8 @@ if.else5:                                         ; preds = %if.end
   %9 = tail call <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32.f32(<vscale x 2 x float> undef, float 1.000000e+00, i64 %8)
   %10 = tail call <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32.f32( <vscale x 2 x float> undef, float 2.000000e+00, i64 %8)
   %11 = tail call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> %9, <vscale x 2 x float> %10, i64 7, i64 %8)
-  %12 = bitcast i8* @scratch to <vscale x 2 x float>*
-  tail call void @llvm.riscv.vse.nxv2f32(<vscale x 2 x float> %11, <vscale x 2 x float>* %12, i64 %8)
+  %12 = bitcast ptr @scratch to ptr
+  tail call void @llvm.riscv.vse.nxv2f32(<vscale x 2 x float> %11, ptr %12, i64 %8)
   br label %if.end10
 
 if.end10:                                         ; preds = %if.else5, %if.then4
@@ -440,7 +440,7 @@ if.end:                                           ; preds = %if.else, %if.then
   ret <vscale x 1 x double> %3
 }
 
-define void @saxpy_vec(i64 %n, float %a, float* nocapture readonly %x, float* nocapture %y) {
+define void @saxpy_vec(i64 %n, float %a, ptr nocapture readonly %x, ptr nocapture %y) {
 ; CHECK-LABEL: saxpy_vec:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, a0, e32, m8, ta, ma
@@ -468,16 +468,16 @@ entry:
 for.body:                                         ; preds = %for.body, %entry
   %1 = phi i64 [ %7, %for.body ], [ %0, %entry ]
   %n.addr.016 = phi i64 [ %sub, %for.body ], [ %n, %entry ]
-  %x.addr.015 = phi float* [ %add.ptr, %for.body ], [ %x, %entry ]
-  %y.addr.014 = phi float* [ %add.ptr1, %for.body ], [ %y, %entry ]
-  %2 = bitcast float* %x.addr.015 to <vscale x 16 x float>*
-  %3 = tail call <vscale x 16 x float> @llvm.riscv.vle.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float>* %2, i64 %1)
-  %add.ptr = getelementptr inbounds float, float* %x.addr.015, i64 %1
-  %4 = bitcast float* %y.addr.014 to <vscale x 16 x float>*
-  %5 = tail call <vscale x 16 x float> @llvm.riscv.vle.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float>* %4, i64 %1)
+  %x.addr.015 = phi ptr [ %add.ptr, %for.body ], [ %x, %entry ]
+  %y.addr.014 = phi ptr [ %add.ptr1, %for.body ], [ %y, %entry ]
+  %2 = bitcast ptr %x.addr.015 to ptr
+  %3 = tail call <vscale x 16 x float> @llvm.riscv.vle.nxv16f32.i64(<vscale x 16 x float> undef, ptr %2, i64 %1)
+  %add.ptr = getelementptr inbounds float, ptr %x.addr.015, i64 %1
+  %4 = bitcast ptr %y.addr.014 to ptr
+  %5 = tail call <vscale x 16 x float> @llvm.riscv.vle.nxv16f32.i64(<vscale x 16 x float> undef, ptr %4, i64 %1)
   %6 = tail call <vscale x 16 x float> @llvm.riscv.vfmacc.nxv16f32.f32.i64(<vscale x 16 x float> %5, float %a, <vscale x 16 x float> %3, i64 7, i64 %1, i64 0)
-  tail call void @llvm.riscv.vse.nxv16f32.i64(<vscale x 16 x float> %6, <vscale x 16 x float>* %4, i64 %1)
-  %add.ptr1 = getelementptr inbounds float, float* %y.addr.014, i64 %1
+  tail call void @llvm.riscv.vse.nxv16f32.i64(<vscale x 16 x float> %6, ptr %4, i64 %1)
+  %add.ptr1 = getelementptr inbounds float, ptr %y.addr.014, i64 %1
   %sub = sub i64 %n.addr.016, %1
   %7 = tail call i64 @llvm.riscv.vsetvli.i64(i64 %sub, i64 2, i64 3)
   %cmp.not = icmp eq i64 %7, 0
@@ -488,14 +488,14 @@ for.end:                                          ; preds = %for.body, %entry
 }
 
 declare i64 @llvm.riscv.vsetvli.i64(i64, i64 immarg, i64 immarg)
-declare <vscale x 16 x float> @llvm.riscv.vle.nxv16f32.i64(<vscale x 16 x float>, <vscale x 16 x float>* nocapture, i64)
+declare <vscale x 16 x float> @llvm.riscv.vle.nxv16f32.i64(<vscale x 16 x float>, ptr nocapture, i64)
 declare <vscale x 16 x float> @llvm.riscv.vfmacc.nxv16f32.f32.i64(<vscale x 16 x float>, float, <vscale x 16 x float>, i64, i64, i64)
-declare void @llvm.riscv.vse.nxv16f32.i64(<vscale x 16 x float>, <vscale x 16 x float>* nocapture, i64)
+declare void @llvm.riscv.vse.nxv16f32.i64(<vscale x 16 x float>, ptr nocapture, i64)
 
 ; We need a vsetvli in the last block because the predecessors have 
diff erent
 ; VTYPEs. The AVL is the same and the SEW/LMUL ratio implies the same VLMAX so
 ; we don't need to read AVL and can keep VL unchanged.
-define <vscale x 2 x i32> @test_vsetvli_x0_x0(<vscale x 2 x i32>* %x, <vscale x 2 x i16>* %y, <vscale x 2 x i32> %z, i64 %vl, i1 %cond) nounwind {
+define <vscale x 2 x i32> @test_vsetvli_x0_x0(ptr %x, ptr %y, <vscale x 2 x i32> %z, i64 %vl, i1 %cond) nounwind {
 ; CHECK-LABEL: test_vsetvli_x0_x0:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
@@ -511,11 +511,11 @@ define <vscale x 2 x i32> @test_vsetvli_x0_x0(<vscale x 2 x i32>* %x, <vscale x
 ; CHECK-NEXT:    vadd.vv v8, v9, v8
 ; CHECK-NEXT:    ret
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32>* %x, i64 %vl)
+  %a = call <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32(<vscale x 2 x i32> undef, ptr %x, i64 %vl)
   br i1 %cond, label %if, label %if.end
 
 if:
-  %b = call <vscale x 2 x i16> @llvm.riscv.vle.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16>* %y, i64 %vl)
+  %b = call <vscale x 2 x i16> @llvm.riscv.vle.nxv2i16(<vscale x 2 x i16> undef, ptr %y, i64 %vl)
   %c = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i16> %b, i16 0, i64 %vl)
   br label %if.end
 
@@ -524,8 +524,8 @@ if.end:
   %e = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> %a, <vscale x 2 x i32> %d, i64 %vl)
   ret <vscale x 2 x i32> %e
 }
-declare <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>*, i64)
-declare <vscale x 2 x i16> @llvm.riscv.vle.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>*, i64)
+declare <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32(<vscale x 2 x i32>, ptr, i64)
+declare <vscale x 2 x i16> @llvm.riscv.vle.nxv2i16(<vscale x 2 x i16>, ptr, i64)
 declare <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i16>, i16, i64)
 declare <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, i64)
 
@@ -534,7 +534,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32(<vscale x 2 x i32>, <vscale
 ; know the SEW/LMUL ratio for the if.end predecessor and the full vtype for
 ; the if2 predecessor. This makes sure we can merge a SEW/LMUL predecessor with
 ; a predecessor we know the vtype for.
-define <vscale x 2 x i32> @test_vsetvli_x0_x0_2(<vscale x 2 x i32>* %x, <vscale x 2 x i16>* %y, <vscale x 2 x i16>* %z, i64 %vl, i1 %cond, i1 %cond2, <vscale x 2 x i32> %w) nounwind {
+define <vscale x 2 x i32> @test_vsetvli_x0_x0_2(ptr %x, ptr %y, ptr %z, i64 %vl, i1 %cond, i1 %cond2, <vscale x 2 x i32> %w) nounwind {
 ; CHECK-LABEL: test_vsetvli_x0_x0_2:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a3, e32, m1, ta, ma
@@ -557,11 +557,11 @@ define <vscale x 2 x i32> @test_vsetvli_x0_x0_2(<vscale x 2 x i32>* %x, <vscale
 ; CHECK-NEXT:    vadd.vv v8, v9, v8
 ; CHECK-NEXT:    ret
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32>* %x, i64 %vl)
+  %a = call <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32(<vscale x 2 x i32> undef, ptr %x, i64 %vl)
   br i1 %cond, label %if, label %if.end
 
 if:
-  %b = call <vscale x 2 x i16> @llvm.riscv.vle.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16>* %y, i64 %vl)
+  %b = call <vscale x 2 x i16> @llvm.riscv.vle.nxv2i16(<vscale x 2 x i16> undef, ptr %y, i64 %vl)
   %c = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16(<vscale x 2 x i32> undef, <vscale x 2 x i32> %a, <vscale x 2 x i16> %b, i64 %vl)
   br label %if.end
 
@@ -570,7 +570,7 @@ if.end:
   br i1 %cond2, label %if2, label %if2.end
 
 if2:
-  %e = call <vscale x 2 x i16> @llvm.riscv.vle.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16>* %z, i64 %vl)
+  %e = call <vscale x 2 x i16> @llvm.riscv.vle.nxv2i16(<vscale x 2 x i16> undef, ptr %z, i64 %vl)
   %f = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16(<vscale x 2 x i32> undef, <vscale x 2 x i32> %d, <vscale x 2 x i16> %e, i64 %vl)
   br label %if2.end
 
@@ -582,7 +582,7 @@ if2.end:
 declare <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i16>, i64)
 
 ; We should only need 1 vsetvli for this code.
-define void @vlmax(i64 %N, double* %c, double* %a, double* %b) {
+define void @vlmax(i64 %N, ptr %c, ptr %a, ptr %b) {
 ; CHECK-LABEL: vlmax:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a6, zero, e64, m1, ta, ma
@@ -610,16 +610,16 @@ entry:
 
 for.body:                                         ; preds = %entry, %for.body
   %i.014 = phi i64 [ %add, %for.body ], [ 0, %entry ]
-  %arrayidx = getelementptr inbounds double, double* %a, i64 %i.014
-  %1 = bitcast double* %arrayidx to <vscale x 1 x double>*
-  %2 = tail call <vscale x 1 x double> @llvm.riscv.vle.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double>* %1, i64 %0)
-  %arrayidx1 = getelementptr inbounds double, double* %b, i64 %i.014
-  %3 = bitcast double* %arrayidx1 to <vscale x 1 x double>*
-  %4 = tail call <vscale x 1 x double> @llvm.riscv.vle.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double>* %3, i64 %0)
+  %arrayidx = getelementptr inbounds double, ptr %a, i64 %i.014
+  %1 = bitcast ptr %arrayidx to ptr
+  %2 = tail call <vscale x 1 x double> @llvm.riscv.vle.nxv1f64.i64(<vscale x 1 x double> undef, ptr %1, i64 %0)
+  %arrayidx1 = getelementptr inbounds double, ptr %b, i64 %i.014
+  %3 = bitcast ptr %arrayidx1 to ptr
+  %4 = tail call <vscale x 1 x double> @llvm.riscv.vle.nxv1f64.i64(<vscale x 1 x double> undef, ptr %3, i64 %0)
   %5 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> %2, <vscale x 1 x double> %4, i64 7, i64 %0)
-  %arrayidx2 = getelementptr inbounds double, double* %c, i64 %i.014
-  %6 = bitcast double* %arrayidx2 to <vscale x 1 x double>*
-  tail call void @llvm.riscv.vse.nxv1f64.i64(<vscale x 1 x double> %5, <vscale x 1 x double>* %6, i64 %0)
+  %arrayidx2 = getelementptr inbounds double, ptr %c, i64 %i.014
+  %6 = bitcast ptr %arrayidx2 to ptr
+  tail call void @llvm.riscv.vse.nxv1f64.i64(<vscale x 1 x double> %5, ptr %6, i64 %0)
   %add = add nuw nsw i64 %i.014, %0
   %cmp = icmp slt i64 %add, %N
   br i1 %cmp, label %for.body, label %for.end
@@ -629,7 +629,7 @@ for.end:                                          ; preds = %for.body, %entry
 }
 
 ; A single vector store in the loop with VL controlled by VLMAX
-define void @vector_init_vlmax(i64 %N, double* %c) {
+define void @vector_init_vlmax(i64 %N, ptr %c) {
 ; CHECK-LABEL: vector_init_vlmax:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e64, m1, ta, ma
@@ -653,9 +653,9 @@ entry:
 
 for.body:                                         ; preds = %entry, %for.body
   %i.014 = phi i64 [ %add, %for.body ], [ 0, %entry ]
-  %arrayidx2 = getelementptr inbounds double, double* %c, i64 %i.014
-  %addr = bitcast double* %arrayidx2 to <vscale x 1 x double>*
-  tail call void @llvm.riscv.vse.nxv1f64.i64(<vscale x 1 x double> zeroinitializer, <vscale x 1 x double>* %addr, i64 %0)
+  %arrayidx2 = getelementptr inbounds double, ptr %c, i64 %i.014
+  %addr = bitcast ptr %arrayidx2 to ptr
+  tail call void @llvm.riscv.vse.nxv1f64.i64(<vscale x 1 x double> zeroinitializer, ptr %addr, i64 %0)
   %add = add nuw nsw i64 %i.014, %0
   %cmp = icmp slt i64 %add, %N
   br i1 %cmp, label %for.body, label %for.end
@@ -665,7 +665,7 @@ for.end:                                          ; preds = %for.body, %entry
 }
 
 ; Same as above, but VL comes from user provided AVL value
-define void @vector_init_vsetvli_N(i64 %N, double* %c) {
+define void @vector_init_vsetvli_N(i64 %N, ptr %c) {
 ; CHECK-LABEL: vector_init_vsetvli_N:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a0, e64, m1, ta, ma
@@ -691,9 +691,9 @@ entry:
 
 for.body:                                         ; preds = %entry, %for.body
   %i.014 = phi i64 [ %add, %for.body ], [ 0, %entry ]
-  %arrayidx2 = getelementptr inbounds double, double* %c, i64 %i.014
-  %addr = bitcast double* %arrayidx2 to <vscale x 1 x double>*
-  tail call void @llvm.riscv.vse.nxv1f64.i64(<vscale x 1 x double> zeroinitializer, <vscale x 1 x double>* %addr, i64 %0)
+  %arrayidx2 = getelementptr inbounds double, ptr %c, i64 %i.014
+  %addr = bitcast ptr %arrayidx2 to ptr
+  tail call void @llvm.riscv.vse.nxv1f64.i64(<vscale x 1 x double> zeroinitializer, ptr %addr, i64 %0)
   %add = add nuw nsw i64 %i.014, %0
   %cmp = icmp slt i64 %add, %N
   br i1 %cmp, label %for.body, label %for.end
@@ -703,7 +703,7 @@ for.end:                                          ; preds = %for.body, %entry
 }
 
 ; Same as above, but VL is a hard coded constant (in the preheader)
-define void @vector_init_vsetvli_fv(i64 %N, double* %c) {
+define void @vector_init_vsetvli_fv(i64 %N, ptr %c) {
 ; CHECK-LABEL: vector_init_vsetvli_fv:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    li a2, 0
@@ -726,9 +726,9 @@ entry:
 
 for.body:                                         ; preds = %entry, %for.body
   %i.014 = phi i64 [ %add, %for.body ], [ 0, %entry ]
-  %arrayidx2 = getelementptr inbounds double, double* %c, i64 %i.014
-  %addr = bitcast double* %arrayidx2 to <vscale x 1 x double>*
-  tail call void @llvm.riscv.vse.nxv1f64.i64(<vscale x 1 x double> zeroinitializer, <vscale x 1 x double>* %addr, i64 %0)
+  %arrayidx2 = getelementptr inbounds double, ptr %c, i64 %i.014
+  %addr = bitcast ptr %arrayidx2 to ptr
+  tail call void @llvm.riscv.vse.nxv1f64.i64(<vscale x 1 x double> zeroinitializer, ptr %addr, i64 %0)
   %add = add nuw nsw i64 %i.014, %0
   %cmp = icmp slt i64 %add, %N
   br i1 %cmp, label %for.body, label %for.end
@@ -739,7 +739,7 @@ for.end:                                          ; preds = %for.body
 
 ; Same as above, but result of vsetvli in preheader isn't used, and
 ; constant is repeated in loop
-define void @vector_init_vsetvli_fv2(i64 %N, double* %c) {
+define void @vector_init_vsetvli_fv2(i64 %N, ptr %c) {
 ; CHECK-LABEL: vector_init_vsetvli_fv2:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    li a2, 0
@@ -760,9 +760,9 @@ entry:
 
 for.body:                                         ; preds = %entry, %for.body
   %i.014 = phi i64 [ %add, %for.body ], [ 0, %entry ]
-  %arrayidx2 = getelementptr inbounds double, double* %c, i64 %i.014
-  %addr = bitcast double* %arrayidx2 to <vscale x 1 x double>*
-  tail call void @llvm.riscv.vse.nxv1f64.i64(<vscale x 1 x double> zeroinitializer, <vscale x 1 x double>* %addr, i64 4)
+  %arrayidx2 = getelementptr inbounds double, ptr %c, i64 %i.014
+  %addr = bitcast ptr %arrayidx2 to ptr
+  tail call void @llvm.riscv.vse.nxv1f64.i64(<vscale x 1 x double> zeroinitializer, ptr %addr, i64 4)
   %add = add nuw nsw i64 %i.014, 4
   %cmp = icmp slt i64 %add, %N
   br i1 %cmp, label %for.body, label %for.end
@@ -773,7 +773,7 @@ for.end:                                          ; preds = %for.body
 
 ; Same as above, but AVL is only specified on the store intrinsic
 ; This case will require some form of hoisting or PRE
-define void @vector_init_vsetvli_fv3(i64 %N, double* %c) {
+define void @vector_init_vsetvli_fv3(i64 %N, ptr %c) {
 ; CHECK-LABEL: vector_init_vsetvli_fv3:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    li a2, 0
@@ -793,9 +793,9 @@ entry:
 
 for.body:                                         ; preds = %entry, %for.body
   %i.014 = phi i64 [ %add, %for.body ], [ 0, %entry ]
-  %arrayidx2 = getelementptr inbounds double, double* %c, i64 %i.014
-  %addr = bitcast double* %arrayidx2 to <vscale x 1 x double>*
-  tail call void @llvm.riscv.vse.nxv1f64.i64(<vscale x 1 x double> zeroinitializer, <vscale x 1 x double>* %addr, i64 4)
+  %arrayidx2 = getelementptr inbounds double, ptr %c, i64 %i.014
+  %addr = bitcast ptr %arrayidx2 to ptr
+  tail call void @llvm.riscv.vse.nxv1f64.i64(<vscale x 1 x double> zeroinitializer, ptr %addr, i64 4)
   %add = add nuw nsw i64 %i.014, 4
   %cmp = icmp slt i64 %add, %N
   br i1 %cmp, label %for.body, label %for.end
@@ -854,7 +854,7 @@ if.end:
   ret <vscale x 2 x i32> %b
 }
 
-define <vscale x 1 x double> @compat_store_consistency(i1 %cond, <vscale x 1 x double> %a, <vscale x 1 x double> %b, <vscale x 1 x double>* %p1, <vscale x 1 x float> %c, <vscale x 1 x float>* %p2) {
+define <vscale x 1 x double> @compat_store_consistency(i1 %cond, <vscale x 1 x double> %a, <vscale x 1 x double> %b, ptr %p1, <vscale x 1 x float> %c, ptr %p2) {
 ; CHECK-LABEL: compat_store_consistency:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    andi a0, a0, 1
@@ -868,11 +868,11 @@ define <vscale x 1 x double> @compat_store_consistency(i1 %cond, <vscale x 1 x d
 ; CHECK-NEXT:    ret
 entry:
   %res = fadd <vscale x 1 x double> %a, %b
-  store <vscale x 1 x double> %res, <vscale x 1 x double>* %p1
+  store <vscale x 1 x double> %res, ptr %p1
   br i1 %cond, label %if.then, label %if.end
 
 if.then:                                          ; preds = %entry
-  store <vscale x 1 x float> %c, <vscale x 1 x float>* %p2
+  store <vscale x 1 x float> %c, ptr %p2
   br label %if.end
 
 if.end:                                           ; preds = %if.else, %if.then
@@ -882,7 +882,7 @@ if.end:                                           ; preds = %if.else, %if.then
 ; Next two tests (which are the same except for swapped block order), make sure that the
 ; demanded reasoning around vmv.s.x correctly handles a forward state with only a valid
 ; SEWLMULRatio.  We previously had a crash bug in this case.
-define <vscale x 2 x i32> @test_ratio_only_vmv_s_x(<vscale x 2 x i32>* %x, <vscale x 2 x i16>* %y, i1 %cond) nounwind {
+define <vscale x 2 x i32> @test_ratio_only_vmv_s_x(ptr %x, ptr %y, i1 %cond) nounwind {
 ; CHECK-LABEL: test_ratio_only_vmv_s_x:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    andi a2, a2, 1
@@ -900,11 +900,11 @@ define <vscale x 2 x i32> @test_ratio_only_vmv_s_x(<vscale x 2 x i32>* %x, <vsca
 ; CHECK-NEXT:    vmv.s.x v8, zero
 ; CHECK-NEXT:    ret
 entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32>* %x, i64 2)
+  %a = call <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32(<vscale x 2 x i32> undef, ptr %x, i64 2)
   br i1 %cond, label %if, label %if.end
 
 if:
-  %b = call <vscale x 2 x i16> @llvm.riscv.vle.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16>* %y, i64 2)
+  %b = call <vscale x 2 x i16> @llvm.riscv.vle.nxv2i16(<vscale x 2 x i16> undef, ptr %y, i64 2)
   %c = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i16> %b, i16 0, i64 2)
   br label %if.end
 
@@ -914,7 +914,7 @@ if.end:
   ret <vscale x 2 x i32> %e
 }
 
-define <vscale x 2 x i32> @test_ratio_only_vmv_s_x2(<vscale x 2 x i32>* %x, <vscale x 2 x i16>* %y, i1 %cond) nounwind {
+define <vscale x 2 x i32> @test_ratio_only_vmv_s_x2(ptr %x, ptr %y, i1 %cond) nounwind {
 ; CHECK-LABEL: test_ratio_only_vmv_s_x2:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    andi a2, a2, 1
@@ -932,12 +932,12 @@ define <vscale x 2 x i32> @test_ratio_only_vmv_s_x2(<vscale x 2 x i32>* %x, <vsc
 ; CHECK-NEXT:    vmv.s.x v8, zero
 ; CHECK-NEXT:    ret
 entry:
-  %b = call <vscale x 2 x i16> @llvm.riscv.vle.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16>* %y, i64 2)
+  %b = call <vscale x 2 x i16> @llvm.riscv.vle.nxv2i16(<vscale x 2 x i16> undef, ptr %y, i64 2)
   %c = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i16> %b, i16 0, i64 2)
   br i1 %cond, label %if, label %if.end
 
 if:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32>* %x, i64 2)
+  %a = call <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32(<vscale x 2 x i32> undef, ptr %x, i64 2)
   br label %if.end
 
 if.end:
@@ -979,9 +979,9 @@ exit:
 }
 
 declare i64 @llvm.riscv.vsetvlimax.i64(i64, i64)
-declare <vscale x 1 x double> @llvm.riscv.vle.nxv1f64.i64(<vscale x 1 x double>, <vscale x 1 x double>* nocapture, i64)
+declare <vscale x 1 x double> @llvm.riscv.vle.nxv1f64.i64(<vscale x 1 x double>, ptr nocapture, i64)
 declare <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64(<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, i64, i64)
-declare void @llvm.riscv.vse.nxv1f64.i64(<vscale x 1 x double>, <vscale x 1 x double>* nocapture, i64)
+declare void @llvm.riscv.vse.nxv1f64.i64(<vscale x 1 x double>, ptr nocapture, i64)
 declare <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir
index e0d0f97a0f478..fdcce72a01eb3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir
@@ -7,9 +7,9 @@
   target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n64-S128"
   target triple = "riscv64"
 
-  define <vscale x 1 x i64> @load_add_or_sub(i8 zeroext %cond, <vscale x 1 x i64>* %0, <vscale x 1 x i64> %1, i64 %2) #0 {
+  define <vscale x 1 x i64> @load_add_or_sub(i8 zeroext %cond, ptr %0, <vscale x 1 x i64> %1, i64 %2) #0 {
   entry:
-    %a = call <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64>* %0, i64 %2)
+    %a = call <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64.i64(<vscale x 1 x i64> undef, ptr %0, i64 %2)
     %tobool = icmp eq i8 %cond, 0
     br i1 %tobool, label %if.else, label %if.then
 
@@ -26,9 +26,9 @@
     ret <vscale x 1 x i64> %d
   }
 
-  define void @load_zext_or_sext(i8 zeroext %cond, <vscale x 1 x i32>* %0, <vscale x 1 x i64>* %1, i64 %2) #0 {
+  define void @load_zext_or_sext(i8 zeroext %cond, ptr %0, ptr %1, i64 %2) #0 {
   entry:
-    %a = call <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32>* %0, i64 %2)
+    %a = call <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32.i64(<vscale x 1 x i32> undef, ptr %0, i64 %2)
     %tobool = icmp eq i8 %cond, 0
     br i1 %tobool, label %if.else, label %if.then
 
@@ -42,7 +42,7 @@
 
   if.end:                                           ; preds = %if.else, %if.then
     %d = phi <vscale x 1 x i64> [ %b, %if.then ], [ %c, %if.else ]
-    call void @llvm.riscv.vse.nxv1i64.i64(<vscale x 1 x i64> %d, <vscale x 1 x i64>* %1, i64 %2)
+    call void @llvm.riscv.vse.nxv1i64.i64(<vscale x 1 x i64> %d, ptr %1, i64 %2)
     ret void
   }
 
@@ -100,25 +100,25 @@
     ret void
   }
 
-  define void @redusum_loop(i32* nocapture noundef readonly %a, i32 noundef signext %n, i32* nocapture noundef writeonly %res) #0 {
+  define void @redusum_loop(ptr nocapture noundef readonly %a, i32 noundef signext %n, ptr nocapture noundef writeonly %res) #0 {
   entry:
     br label %vector.body
 
   vector.body:                                      ; preds = %vector.body, %entry
-    %lsr.iv1 = phi i32* [ %scevgep, %vector.body ], [ %a, %entry ]
+    %lsr.iv1 = phi ptr [ %scevgep, %vector.body ], [ %a, %entry ]
     %lsr.iv = phi i64 [ %lsr.iv.next, %vector.body ], [ 2048, %entry ]
     %vec.phi = phi <4 x i32> [ zeroinitializer, %entry ], [ %0, %vector.body ]
-    %lsr.iv12 = bitcast i32* %lsr.iv1 to <4 x i32>*
-    %wide.load = load <4 x i32>, <4 x i32>* %lsr.iv12, align 4
+    %lsr.iv12 = bitcast ptr %lsr.iv1 to ptr
+    %wide.load = load <4 x i32>, ptr %lsr.iv12, align 4
     %0 = add <4 x i32> %wide.load, %vec.phi
     %lsr.iv.next = add nsw i64 %lsr.iv, -4
-    %scevgep = getelementptr i32, i32* %lsr.iv1, i64 4
+    %scevgep = getelementptr i32, ptr %lsr.iv1, i64 4
     %1 = icmp eq i64 %lsr.iv.next, 0
     br i1 %1, label %middle.block, label %vector.body
 
   middle.block:                                     ; preds = %vector.body
     %2 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %0)
-    store i32 %2, i32* %res, align 4
+    store i32 %2, ptr %res, align 4
     ret void
   }
 
@@ -136,11 +136,11 @@
 
   declare <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, i64) #1
 
-  declare <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64.i64(<vscale x 1 x i64>, <vscale x 1 x i64>* nocapture, i64) #3
+  declare <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64.i64(<vscale x 1 x i64>, ptr nocapture, i64) #3
 
-  declare <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32.i64(<vscale x 1 x i32>, <vscale x 1 x i32>* nocapture, i64) #3
+  declare <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32.i64(<vscale x 1 x i32>, ptr nocapture, i64) #3
 
-  declare void @llvm.riscv.vse.nxv1i64.i64(<vscale x 1 x i64>, <vscale x 1 x i64>* nocapture, i64) #4
+  declare void @llvm.riscv.vse.nxv1i64.i64(<vscale x 1 x i64>, ptr nocapture, i64) #4
 
   declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>, <vscale x 1 x i32>, i64) #1
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
index 7c95d81306655..29ce7c52e8fd5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
@@ -11,7 +11,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
   i64, i64)
 declare <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   <vscale x 1 x i1>,
   i64, i64)
 
@@ -47,7 +47,7 @@ entry:
   ret <vscale x 1 x double> %1
 }
 
-define <vscale x 1 x i64> @test3(i64 %avl, <vscale x 1 x i64> %a, <vscale x 1 x i64>* %b, <vscale x 1 x i1> %c) nounwind {
+define <vscale x 1 x i64> @test3(i64 %avl, <vscale x 1 x i64> %a, ptr %b, <vscale x 1 x i1> %c) nounwind {
 ; CHECK-LABEL: test3:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -57,14 +57,14 @@ entry:
   %0 = tail call i64 @llvm.riscv.vsetvli(i64 %avl, i64 3, i64 0)
   %1 = call <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
     <vscale x 1 x i64> %a,
-    <vscale x 1 x i64>* %b,
+    ptr %b,
     <vscale x 1 x i1> %c,
     i64 %0, i64 1)
 
   ret <vscale x 1 x i64> %1
 }
 
-define <vscale x 1 x i64> @test4(i64 %avl, <vscale x 1 x i64> %a, <vscale x 1 x i64>* %b, <vscale x 1 x i1> %c) nounwind {
+define <vscale x 1 x i64> @test4(i64 %avl, <vscale x 1 x i64> %a, ptr %b, <vscale x 1 x i1> %c) nounwind {
 ; CHECK-LABEL: test4:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -74,7 +74,7 @@ entry:
   %0 = tail call i64 @llvm.riscv.vsetvli(i64 %avl, i64 3, i64 0)
   %1 = call <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
     <vscale x 1 x i64> %a,
-    <vscale x 1 x i64>* %b,
+    ptr %b,
     <vscale x 1 x i1> %c,
     i64 %avl, i64 1)
 
@@ -99,7 +99,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.i64(<vscale x 1 x i64>, <vsc
 declare <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1.i64(<vscale x 1 x i1>, <vscale x 1 x i1>, i64)
 
 ; Make sure we don't insert a vsetvli for the vmor instruction.
-define void @test6(i32* nocapture readonly %A, i32* nocapture %B, i64 %n) {
+define void @test6(ptr nocapture readonly %A, ptr nocapture %B, i64 %n) {
 ; CHECK-LABEL: test6:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a6, a2, e32, m1, ta, ma
@@ -132,15 +132,15 @@ for.cond.cleanup:                                 ; preds = %for.body, %entry
 for.body:                                         ; preds = %entry, %for.body
   %1 = phi i64 [ %8, %for.body ], [ %0, %entry ]
   %i.012 = phi i64 [ %add, %for.body ], [ 0, %entry ]
-  %add.ptr = getelementptr inbounds i32, i32* %A, i64 %i.012
-  %2 = bitcast i32* %add.ptr to <vscale x 2 x i32>*
-  %3 = tail call <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32>* %2, i64 %1)
+  %add.ptr = getelementptr inbounds i32, ptr %A, i64 %i.012
+  %2 = bitcast ptr %add.ptr to ptr
+  %3 = tail call <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32.i64(<vscale x 2 x i32> undef, ptr %2, i64 %1)
   %4 = tail call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.i32.i64(<vscale x 2 x i32> %3, i32 -2, i64 %1)
   %5 = tail call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32.i32.i64(<vscale x 2 x i32> %3, i32 2, i64 %1)
   %6 = tail call <vscale x 2 x i1> @llvm.riscv.vmor.nxv2i1.i64(<vscale x 2 x i1> %4, <vscale x 2 x i1> %5, i64 %1)
-  %add.ptr1 = getelementptr inbounds i32, i32* %B, i64 %i.012
-  %7 = bitcast i32* %add.ptr1 to <vscale x 2 x i32>*
-  tail call void @llvm.riscv.vse.mask.nxv2i32.i64(<vscale x 2 x i32> %3, <vscale x 2 x i32>* %7, <vscale x 2 x i1> %6, i64 %1)
+  %add.ptr1 = getelementptr inbounds i32, ptr %B, i64 %i.012
+  %7 = bitcast ptr %add.ptr1 to ptr
+  tail call void @llvm.riscv.vse.mask.nxv2i32.i64(<vscale x 2 x i32> %3, ptr %7, <vscale x 2 x i1> %6, i64 %1)
   %add = add i64 %1, %i.012
   %8 = tail call i64 @llvm.riscv.vsetvli.i64(i64 %n, i64 2, i64 0)
   %cmp.not = icmp eq i64 %8, 0
@@ -394,7 +394,7 @@ entry:
   ret <vscale x 1 x double> %y2
 }
 
-define i64 @avl_forward1(<vscale x 2 x i32> %v, <vscale x 2 x i32>* %p) nounwind {
+define i64 @avl_forward1(<vscale x 2 x i32> %v, ptr %p) nounwind {
 ; CHECK-LABEL: avl_forward1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli a1, 6, e32, m1, ta, ma
@@ -403,12 +403,12 @@ define i64 @avl_forward1(<vscale x 2 x i32> %v, <vscale x 2 x i32>* %p) nounwind
 ; CHECK-NEXT:    ret
 entry:
   %vl = tail call i64 @llvm.riscv.vsetvli(i64 6, i64 2, i64 0)
-  call void @llvm.riscv.vse.nxv2i32.i64(<vscale x 2 x i32> %v, <vscale x 2 x i32>* %p, i64 %vl)
+  call void @llvm.riscv.vse.nxv2i32.i64(<vscale x 2 x i32> %v, ptr %p, i64 %vl)
   ret i64 %vl
 }
 
 ; Incompatible vtype
-define i64 @avl_forward1b_neg(<vscale x 2 x i32> %v, <vscale x 2 x i32>* %p) nounwind {
+define i64 @avl_forward1b_neg(<vscale x 2 x i32> %v, ptr %p) nounwind {
 ; CHECK-LABEL: avl_forward1b_neg:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli a1, 6, e16, m1, ta, ma
@@ -418,11 +418,11 @@ define i64 @avl_forward1b_neg(<vscale x 2 x i32> %v, <vscale x 2 x i32>* %p) nou
 ; CHECK-NEXT:    ret
 entry:
   %vl = tail call i64 @llvm.riscv.vsetvli(i64 6, i64 1, i64 0)
-  call void @llvm.riscv.vse.nxv2i32.i64(<vscale x 2 x i32> %v, <vscale x 2 x i32>* %p, i64 %vl)
+  call void @llvm.riscv.vse.nxv2i32.i64(<vscale x 2 x i32> %v, ptr %p, i64 %vl)
   ret i64 %vl
 }
 
-define i64 @avl_forward2(<vscale x 2 x i32> %v, <vscale x 2 x i32>* %p) nounwind {
+define i64 @avl_forward2(<vscale x 2 x i32> %v, ptr %p) nounwind {
 ; CHECK-LABEL: avl_forward2:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
@@ -431,13 +431,13 @@ define i64 @avl_forward2(<vscale x 2 x i32> %v, <vscale x 2 x i32>* %p) nounwind
 ; CHECK-NEXT:    ret
 entry:
   %vl = tail call i64 @llvm.riscv.vsetvlimax(i64 2, i64 0)
-  call void @llvm.riscv.vse.nxv2i32.i64(<vscale x 2 x i32> %v, <vscale x 2 x i32>* %p, i64 %vl)
+  call void @llvm.riscv.vse.nxv2i32.i64(<vscale x 2 x i32> %v, ptr %p, i64 %vl)
   ret i64 %vl
 }
 
 
 ; %vl is intentionally used only once
-define void @avl_forward3(<vscale x 2 x i32> %v, <vscale x 2 x i32>* %p, i64 %reg) nounwind {
+define void @avl_forward3(<vscale x 2 x i32> %v, ptr %p, i64 %reg) nounwind {
 ; CHECK-LABEL: avl_forward3:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -445,12 +445,12 @@ define void @avl_forward3(<vscale x 2 x i32> %v, <vscale x 2 x i32>* %p, i64 %re
 ; CHECK-NEXT:    ret
 entry:
   %vl = tail call i64 @llvm.riscv.vsetvli(i64 %reg, i64 2, i64 0)
-  call void @llvm.riscv.vse.nxv2i32.i64(<vscale x 2 x i32> %v, <vscale x 2 x i32>* %p, i64 %vl)
+  call void @llvm.riscv.vse.nxv2i32.i64(<vscale x 2 x i32> %v, ptr %p, i64 %vl)
   ret void
 }
 
 ; %vl has multiple uses
-define i64 @avl_forward3b(<vscale x 2 x i32> %v, <vscale x 2 x i32>* %p, i64 %reg) nounwind {
+define i64 @avl_forward3b(<vscale x 2 x i32> %v, ptr %p, i64 %reg) nounwind {
 ; CHECK-LABEL: avl_forward3b:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32, m1, ta, ma
@@ -459,12 +459,12 @@ define i64 @avl_forward3b(<vscale x 2 x i32> %v, <vscale x 2 x i32>* %p, i64 %re
 ; CHECK-NEXT:    ret
 entry:
   %vl = tail call i64 @llvm.riscv.vsetvli(i64 %reg, i64 2, i64 0)
-  call void @llvm.riscv.vse.nxv2i32.i64(<vscale x 2 x i32> %v, <vscale x 2 x i32>* %p, i64 %vl)
+  call void @llvm.riscv.vse.nxv2i32.i64(<vscale x 2 x i32> %v, ptr %p, i64 %vl)
   ret i64 %vl
 }
 
 ; Like4, but with incompatible VTYPE
-define void @avl_forward4(<vscale x 2 x i32> %v, <vscale x 2 x i32>* %p, i64 %reg) nounwind {
+define void @avl_forward4(<vscale x 2 x i32> %v, ptr %p, i64 %reg) nounwind {
 ; CHECK-LABEL: avl_forward4:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16, m1, ta, ma
@@ -473,12 +473,12 @@ define void @avl_forward4(<vscale x 2 x i32> %v, <vscale x 2 x i32>* %p, i64 %re
 ; CHECK-NEXT:    ret
 entry:
   %vl = tail call i64 @llvm.riscv.vsetvli(i64 %reg, i64 1, i64 0)
-  call void @llvm.riscv.vse.nxv2i32.i64(<vscale x 2 x i32> %v, <vscale x 2 x i32>* %p, i64 %vl)
+  call void @llvm.riscv.vse.nxv2i32.i64(<vscale x 2 x i32> %v, ptr %p, i64 %vl)
   ret void
 }
 
 ; Like4b, but with incompatible VTYPE
-define i64 @avl_forward4b(<vscale x 2 x i32> %v, <vscale x 2 x i32>* %p, i64 %reg) nounwind {
+define i64 @avl_forward4b(<vscale x 2 x i32> %v, ptr %p, i64 %reg) nounwind {
 ; CHECK-LABEL: avl_forward4b:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16, m1, ta, ma
@@ -488,13 +488,13 @@ define i64 @avl_forward4b(<vscale x 2 x i32> %v, <vscale x 2 x i32>* %p, i64 %re
 ; CHECK-NEXT:    ret
 entry:
   %vl = tail call i64 @llvm.riscv.vsetvli(i64 %reg, i64 1, i64 0)
-  call void @llvm.riscv.vse.nxv2i32.i64(<vscale x 2 x i32> %v, <vscale x 2 x i32>* %p, i64 %vl)
+  call void @llvm.riscv.vse.nxv2i32.i64(<vscale x 2 x i32> %v, ptr %p, i64 %vl)
   ret i64 %vl
 }
 
 ; Fault first loads can modify VL.
 ; TODO: The VSETVLI of vadd could be removed here.
-define <vscale x 1 x i64> @vleNff(i64* %str, i64 %n, i64 %x) {
+define <vscale x 1 x i64> @vleNff(ptr %str, i64 %n, i64 %x) {
 ; CHECK-LABEL: vleNff:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8, m4, ta, ma
@@ -505,8 +505,8 @@ define <vscale x 1 x i64> @vleNff(i64* %str, i64 %n, i64 %x) {
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call i64 @llvm.riscv.vsetvli.i64(i64 %n, i64 0, i64 2)
-  %1 = bitcast i64* %str to <vscale x 1 x i64>*
-  %2 = tail call { <vscale x 1 x i64>, i64 } @llvm.riscv.vleff.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64>* %1, i64 %0)
+  %1 = bitcast ptr %str to ptr
+  %2 = tail call { <vscale x 1 x i64>, i64 } @llvm.riscv.vleff.nxv1i64.i64(<vscale x 1 x i64> undef, ptr %1, i64 %0)
   %3 = extractvalue { <vscale x 1 x i64>, i64 } %2, 0
   %4 = extractvalue { <vscale x 1 x i64>, i64 } %2, 1
   %5 = tail call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64.i64(<vscale x 1 x i64> %3, <vscale x 1 x i64> %3, i64 %x, i64 %4)
@@ -515,7 +515,7 @@ entry:
 
 ; Similiar test case, but use same policy for vleff and vadd.
 ; Note: The test may be redundant if we could fix the TODO of @vleNff.
-define <vscale x 1 x i64> @vleNff2(i64* %str, i64 %n, i64 %x) {
+define <vscale x 1 x i64> @vleNff2(ptr %str, i64 %n, i64 %x) {
 ; CHECK-LABEL: vleNff2:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8, m4, ta, ma
@@ -525,8 +525,8 @@ define <vscale x 1 x i64> @vleNff2(i64* %str, i64 %n, i64 %x) {
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call i64 @llvm.riscv.vsetvli.i64(i64 %n, i64 0, i64 2)
-  %1 = bitcast i64* %str to <vscale x 1 x i64>*
-  %2 = tail call { <vscale x 1 x i64>, i64 } @llvm.riscv.vleff.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64>* %1, i64 %0)
+  %1 = bitcast ptr %str to ptr
+  %2 = tail call { <vscale x 1 x i64>, i64 } @llvm.riscv.vleff.nxv1i64.i64(<vscale x 1 x i64> undef, ptr %1, i64 %0)
   %3 = extractvalue { <vscale x 1 x i64>, i64 } %2, 0
   %4 = extractvalue { <vscale x 1 x i64>, i64 } %2, 1
   %5 = tail call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %3, i64 %x, i64 %4)
@@ -534,13 +534,13 @@ entry:
 }
 
 declare { <vscale x 1 x i64>, i64 } @llvm.riscv.vleff.nxv1i64.i64(
-  <vscale x 1 x i64>, <vscale x 1 x i64>* nocapture, i64)
+  <vscale x 1 x i64>, ptr nocapture, i64)
 
 declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.i64.i64(
   <vscale x 1 x i64>, i64, i64)
 
 ; Ensure AVL register is alive when forwarding an AVL immediate that does not fit in 5 bits
-define <vscale x 2 x i32> @avl_forward5(<vscale x 2 x i32>* %addr) {
+define <vscale x 2 x i32> @avl_forward5(ptr %addr) {
 ; CHECK-LABEL: avl_forward5:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li a1, 32
@@ -549,7 +549,7 @@ define <vscale x 2 x i32> @avl_forward5(<vscale x 2 x i32>* %addr) {
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %gvl = tail call i64 @llvm.riscv.vsetvli.i64(i64 32, i64 0, i64 2)
-  %ret = tail call <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32>* %addr, i64 %gvl)
+  %ret = tail call <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32.i64(<vscale x 2 x i32> undef, ptr %addr, i64 %gvl)
   ret <vscale x 2 x i32> %ret
 }
 
@@ -695,9 +695,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfmv.s.f.nxv1f64
   i64)
 
 declare i64 @llvm.riscv.vsetvli.i64(i64, i64 immarg, i64 immarg)
-declare <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32.i64(<vscale x 2 x i32>, <vscale x 2 x i32>* nocapture, i64)
+declare <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32.i64(<vscale x 2 x i32>, ptr nocapture, i64)
 declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.i32.i64(<vscale x 2 x i32>, i32, i64)
 declare <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32.i32.i64(<vscale x 2 x i32>, i32, i64)
 declare <vscale x 2 x i1> @llvm.riscv.vmor.nxv2i1.i64(<vscale x 2 x i1>, <vscale x 2 x i1>, i64)
-declare void @llvm.riscv.vse.mask.nxv2i32.i64(<vscale x 2 x i32>, <vscale x 2 x i32>* nocapture, <vscale x 2 x i1>, i64)
-declare void @llvm.riscv.vse.nxv2i32.i64(<vscale x 2 x i32>, <vscale x 2 x i32>* nocapture, i64)
+declare void @llvm.riscv.vse.mask.nxv2i32.i64(<vscale x 2 x i32>, ptr nocapture, <vscale x 2 x i1>, i64)
+declare void @llvm.riscv.vse.nxv2i32.i64(<vscale x 2 x i32>, ptr nocapture, i64)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir
index 29a7a2f5a6317..a37a672e30a9e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir
@@ -13,16 +13,16 @@
     ret <vscale x 1 x i64> %a
   }
 
-  define <vscale x 1 x i64> @load_add(<vscale x 1 x i64>* %0, <vscale x 1 x i64> %1, i64 %2) #0 {
+  define <vscale x 1 x i64> @load_add(ptr %0, <vscale x 1 x i64> %1, i64 %2) #0 {
   entry:
-    %a = call <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64>* %0, i64 %2)
+    %a = call <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64.i64(<vscale x 1 x i64> undef, ptr %0, i64 %2)
     %b = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %a, <vscale x 1 x i64> %1, i64 %2)
     ret <vscale x 1 x i64> %b
   }
 
-  define <vscale x 1 x i64> @load_zext(<vscale x 1 x i32>* %0, i64 %1) #0 {
+  define <vscale x 1 x i64> @load_zext(ptr %0, i64 %1) #0 {
   entry:
-    %a = call <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32>* %0, i64 %1)
+    %a = call <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32.i64(<vscale x 1 x i32> undef, ptr %0, i64 %1)
     %b = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> %a, i64 %1)
     ret <vscale x 1 x i64> %b
   }
@@ -35,18 +35,18 @@
     ret i64 %a
   }
 
-  define void @add_v2i64(<2 x i64>* %x, <2 x i64>* %y) #0 {
-    %a = load <2 x i64>, <2 x i64>* %x, align 16
-    %b = load <2 x i64>, <2 x i64>* %y, align 16
+  define void @add_v2i64(ptr %x, ptr %y) #0 {
+    %a = load <2 x i64>, ptr %x, align 16
+    %b = load <2 x i64>, ptr %y, align 16
     %c = add <2 x i64> %a, %b
-    store <2 x i64> %c, <2 x i64>* %x, align 16
+    store <2 x i64> %c, ptr %x, align 16
     ret void
   }
 
   declare i64 @llvm.vector.reduce.add.v2i64(<2 x i64>) #2
 
-  define i64 @vreduce_add_v2i64(<2 x i64>* %x) #0 {
-    %v = load <2 x i64>, <2 x i64>* %x, align 16
+  define i64 @vreduce_add_v2i64(ptr %x) #0 {
+    %v = load <2 x i64>, ptr %x, align 16
     %red = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %v)
     ret i64 %red
   }
@@ -60,9 +60,9 @@
     ret <vscale x 1 x i64> %b
   }
 
-  define <vscale x 1 x i64> @load_add_inlineasm(<vscale x 1 x i64>* %0, <vscale x 1 x i64> %1, i64 %2) #0 {
+  define <vscale x 1 x i64> @load_add_inlineasm(ptr %0, <vscale x 1 x i64> %1, i64 %2) #0 {
   entry:
-    %a = call <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64>* %0, i64 %2)
+    %a = call <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64.i64(<vscale x 1 x i64> undef, ptr %0, i64 %2)
     call void asm sideeffect "", ""()
     %b = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %a, <vscale x 1 x i64> %1, i64 %2)
     ret <vscale x 1 x i64> %b
@@ -78,9 +78,9 @@
 
   declare <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, i64) #1
 
-  declare <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64.i64(<vscale x 1 x i64>, <vscale x 1 x i64>* nocapture, i64) #4
+  declare <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64.i64(<vscale x 1 x i64>, ptr nocapture, i64) #4
 
-  declare <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32.i64(<vscale x 1 x i32>, <vscale x 1 x i32>* nocapture, i64) #4
+  declare <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32.i64(<vscale x 1 x i32>, ptr nocapture, i64) #4
 
   declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>, <vscale x 1 x i32>, i64) #1
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-intrinsics.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-intrinsics.ll
index 11b164fbf51e8..20dfc7755d4c4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-intrinsics.ll
@@ -102,17 +102,17 @@ define void @test_vsetvlimax_e32m2_nouse() nounwind {
   ret void
 }
 
-declare <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32.iXLen(<vscale x 4 x i32>, <vscale x 4 x i32>*, iXLen)
+declare <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32.iXLen(<vscale x 4 x i32>, ptr, iXLen)
 
 ; Check that we remove the redundant vsetvli when followed by another operation
-define <vscale x 4 x i32> @redundant_vsetvli(iXLen %avl, <vscale x 4 x i32>* %ptr) nounwind {
+define <vscale x 4 x i32> @redundant_vsetvli(iXLen %avl, ptr %ptr) nounwind {
 ; CHECK-LABEL: redundant_vsetvli:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a1)
 ; CHECK-NEXT:    ret
   %vl = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %avl, iXLen 2, iXLen 1)
-  %x = call <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32.iXLen(<vscale x 4 x i32> undef, <vscale x 4 x i32>* %ptr, iXLen %vl)
+  %x = call <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32.iXLen(<vscale x 4 x i32> undef, ptr %ptr, iXLen %vl)
   ret <vscale x 4 x i32> %x
 }
 
@@ -120,7 +120,7 @@ define <vscale x 4 x i32> @redundant_vsetvli(iXLen %avl, <vscale x 4 x i32>* %pt
 ; operation
 ; FIXME: We don't catch the second vsetvli because it has a use of its output.
 ; We could replace it with the output of the first vsetvli.
-define <vscale x 4 x i32> @repeated_vsetvli(iXLen %avl, <vscale x 4 x i32>* %ptr) nounwind {
+define <vscale x 4 x i32> @repeated_vsetvli(iXLen %avl, ptr %ptr) nounwind {
 ; CHECK-LABEL: repeated_vsetvli:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, a0, e32, m2, ta, ma
@@ -129,7 +129,7 @@ define <vscale x 4 x i32> @repeated_vsetvli(iXLen %avl, <vscale x 4 x i32>* %ptr
 ; CHECK-NEXT:    ret
   %vl0 = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %avl, iXLen 2, iXLen 1)
   %vl1 = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %vl0, iXLen 2, iXLen 1)
-  %x = call <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32.iXLen(<vscale x 4 x i32> undef, <vscale x 4 x i32>* %ptr, iXLen %vl1)
+  %x = call <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32.iXLen(<vscale x 4 x i32> undef, ptr %ptr, iXLen %vl1)
   ret <vscale x 4 x i32> %x
 }
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-regression.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-regression.ll
index bb49b9fd65d0f..f658a2c6b24a6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-regression.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-regression.ll
@@ -6,7 +6,7 @@
 ; update of VL was the vsetvli with e64/m4. Changing VTYPE here changes VLMAX
 ; which may make the original VL invalid. Instead of preserving it we use 0.
 
-define i32 @illegal_preserve_vl(<vscale x 2 x i32> %a, <vscale x 4 x i64> %x, <vscale x 4 x i64>* %y) {
+define i32 @illegal_preserve_vl(<vscale x 2 x i32> %a, <vscale x 4 x i64> %x, ptr %y) {
 ; CHECK-LABEL: illegal_preserve_vl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
@@ -16,7 +16,7 @@ define i32 @illegal_preserve_vl(<vscale x 2 x i32> %a, <vscale x 4 x i64> %x, <v
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
   %index = add <vscale x 4 x i64> %x, %x
-  store <vscale x 4 x i64> %index, <vscale x 4 x i64>* %y
+  store <vscale x 4 x i64> %index, ptr %y
   %elt = extractelement <vscale x 2 x i32> %a, i64 0
   ret i32 %elt
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsm.ll b/llvm/test/CodeGen/RISCV/rvv/vsm.ll
index d3c7fc13a1f39..1f5341e2a332a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsm.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsm.ll
@@ -4,94 +4,94 @@
 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
 ; RUN:   -verify-machineinstrs | FileCheck %s
 
-declare void @llvm.riscv.vsm.nxv1i1(<vscale x 1 x i1>, <vscale x 1 x i1>*, iXLen);
+declare void @llvm.riscv.vsm.nxv1i1(<vscale x 1 x i1>, ptr, iXLen);
 
-define void @intrinsic_vsm_v_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1>* %1, iXLen %2) nounwind {
+define void @intrinsic_vsm_v_nxv1i1(<vscale x 1 x i1> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsm_v_nxv1i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsm.v v0, (a0)
 ; CHECK-NEXT:    ret
 entry:
-  call void @llvm.riscv.vsm.nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1>* %1, iXLen %2)
+  call void @llvm.riscv.vsm.nxv1i1(<vscale x 1 x i1> %0, ptr %1, iXLen %2)
   ret void
 }
 
-declare void @llvm.riscv.vsm.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>*, iXLen);
+declare void @llvm.riscv.vsm.nxv2i1(<vscale x 2 x i1>, ptr, iXLen);
 
-define void @intrinsic_vsm_v_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1>* %1, iXLen %2) nounwind {
+define void @intrinsic_vsm_v_nxv2i1(<vscale x 2 x i1> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsm_v_nxv2i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsm.v v0, (a0)
 ; CHECK-NEXT:    ret
 entry:
-  call void @llvm.riscv.vsm.nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1>* %1, iXLen %2)
+  call void @llvm.riscv.vsm.nxv2i1(<vscale x 2 x i1> %0, ptr %1, iXLen %2)
   ret void
 }
 
-declare void @llvm.riscv.vsm.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>*, iXLen);
+declare void @llvm.riscv.vsm.nxv4i1(<vscale x 4 x i1>, ptr, iXLen);
 
-define void @intrinsic_vsm_v_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1>* %1, iXLen %2) nounwind {
+define void @intrinsic_vsm_v_nxv4i1(<vscale x 4 x i1> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsm_v_nxv4i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsm.v v0, (a0)
 ; CHECK-NEXT:    ret
 entry:
-  call void @llvm.riscv.vsm.nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1>* %1, iXLen %2)
+  call void @llvm.riscv.vsm.nxv4i1(<vscale x 4 x i1> %0, ptr %1, iXLen %2)
   ret void
 }
 
-declare void @llvm.riscv.vsm.nxv8i1(<vscale x 8 x i1>, <vscale x 8 x i1>*, iXLen);
+declare void @llvm.riscv.vsm.nxv8i1(<vscale x 8 x i1>, ptr, iXLen);
 
-define void @intrinsic_vsm_v_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1>* %1, iXLen %2) nounwind {
+define void @intrinsic_vsm_v_nxv8i1(<vscale x 8 x i1> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsm_v_nxv8i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsm.v v0, (a0)
 ; CHECK-NEXT:    ret
 entry:
-  call void @llvm.riscv.vsm.nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1>* %1, iXLen %2)
+  call void @llvm.riscv.vsm.nxv8i1(<vscale x 8 x i1> %0, ptr %1, iXLen %2)
   ret void
 }
 
-declare void @llvm.riscv.vsm.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>*, iXLen);
+declare void @llvm.riscv.vsm.nxv16i1(<vscale x 16 x i1>, ptr, iXLen);
 
-define void @intrinsic_vsm_v_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1>* %1, iXLen %2) nounwind {
+define void @intrinsic_vsm_v_nxv16i1(<vscale x 16 x i1> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsm_v_nxv16i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vsm.v v0, (a0)
 ; CHECK-NEXT:    ret
 entry:
-  call void @llvm.riscv.vsm.nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1>* %1, iXLen %2)
+  call void @llvm.riscv.vsm.nxv16i1(<vscale x 16 x i1> %0, ptr %1, iXLen %2)
   ret void
 }
 
-declare void @llvm.riscv.vsm.nxv32i1(<vscale x 32 x i1>, <vscale x 32 x i1>*, iXLen);
+declare void @llvm.riscv.vsm.nxv32i1(<vscale x 32 x i1>, ptr, iXLen);
 
-define void @intrinsic_vsm_v_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1>* %1, iXLen %2) nounwind {
+define void @intrinsic_vsm_v_nxv32i1(<vscale x 32 x i1> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsm_v_nxv32i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    vsm.v v0, (a0)
 ; CHECK-NEXT:    ret
 entry:
-  call void @llvm.riscv.vsm.nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1>* %1, iXLen %2)
+  call void @llvm.riscv.vsm.nxv32i1(<vscale x 32 x i1> %0, ptr %1, iXLen %2)
   ret void
 }
 
-declare void @llvm.riscv.vsm.nxv64i1(<vscale x 64 x i1>, <vscale x 64 x i1>*, iXLen);
+declare void @llvm.riscv.vsm.nxv64i1(<vscale x 64 x i1>, ptr, iXLen);
 
-define void @intrinsic_vsm_v_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1>* %1, iXLen %2) nounwind {
+define void @intrinsic_vsm_v_nxv64i1(<vscale x 64 x i1> %0, ptr %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsm_v_nxv64i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
 ; CHECK-NEXT:    vsm.v v0, (a0)
 ; CHECK-NEXT:    ret
 entry:
-  call void @llvm.riscv.vsm.nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1>* %1, iXLen %2)
+  call void @llvm.riscv.vsm.nxv64i1(<vscale x 64 x i1> %0, ptr %1, iXLen %2)
   ret void
 }
 
@@ -101,7 +101,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16(
   iXLen);
 
 ; Make sure we can use the vsetvli from the producing instruction.
-define void @test_vsetvli_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1>* %2, iXLen %3) nounwind {
+define void @test_vsetvli_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, ptr %2, iXLen %3) nounwind {
 ; CHECK-LABEL: test_vsetvli_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -113,7 +113,7 @@ entry:
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
     iXLen %3)
-  call void @llvm.riscv.vsm.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1>* %2, iXLen %3)
+  call void @llvm.riscv.vsm.nxv1i1(<vscale x 1 x i1> %a, ptr %2, iXLen %3)
   ret void
 }
 
@@ -122,7 +122,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32(
   <vscale x 1 x i32>,
   iXLen);
 
-define void @test_vsetvli_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1>* %2, iXLen %3) nounwind {
+define void @test_vsetvli_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, ptr %2, iXLen %3) nounwind {
 ; CHECK-LABEL: test_vsetvli_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -134,6 +134,6 @@ entry:
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
     iXLen %3)
-  call void @llvm.riscv.vsm.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1>* %2, iXLen %3)
+  call void @llvm.riscv.vsm.nxv1i1(<vscale x 1 x i1> %a, ptr %2, iXLen %3)
   ret void
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv64.ll
index 3819c5fce4a0a..168d71dab92d9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv64.ll
@@ -6,11 +6,11 @@
 
 declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i64(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   <vscale x 1 x i64>,
   i64);
 
-define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -19,7 +19,7 @@ define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, <vsc
 entry:
   call void @llvm.riscv.vsoxei.nxv1i8.nxv1i64(
     <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
+    ptr %1,
     <vscale x 1 x i64> %2,
     i64 %3)
 
@@ -28,12 +28,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i64(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
   i64);
 
-define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -42,7 +42,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0,
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i64(
     <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
+    ptr %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
     i64 %4)
@@ -52,11 +52,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i64(
   <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
+  ptr,
   <vscale x 2 x i64>,
   i64);
 
-define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -65,7 +65,7 @@ define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, <vsc
 entry:
   call void @llvm.riscv.vsoxei.nxv2i8.nxv2i64(
     <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
+    ptr %1,
     <vscale x 2 x i64> %2,
     i64 %3)
 
@@ -74,12 +74,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i64(
   <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
+  ptr,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
   i64);
 
-define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -88,7 +88,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0,
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i64(
     <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
+    ptr %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
     i64 %4)
@@ -98,11 +98,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i64(
   <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
+  ptr,
   <vscale x 4 x i64>,
   i64);
 
-define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -111,7 +111,7 @@ define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, <vsc
 entry:
   call void @llvm.riscv.vsoxei.nxv4i8.nxv4i64(
     <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
+    ptr %1,
     <vscale x 4 x i64> %2,
     i64 %3)
 
@@ -120,12 +120,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i64(
   <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
+  ptr,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
   i64);
 
-define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -134,7 +134,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0,
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i64(
     <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
+    ptr %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
     i64 %4)
@@ -144,11 +144,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i64(
   <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
+  ptr,
   <vscale x 8 x i64>,
   i64);
 
-define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -157,7 +157,7 @@ define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, <vsc
 entry:
   call void @llvm.riscv.vsoxei.nxv8i8.nxv8i64(
     <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
+    ptr %1,
     <vscale x 8 x i64> %2,
     i64 %3)
 
@@ -166,12 +166,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i64(
   <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
+  ptr,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
   i64);
 
-define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -180,7 +180,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0,
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i64(
     <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
+    ptr %1,
     <vscale x 8 x i64> %2,
     <vscale x 8 x i1> %3,
     i64 %4)
@@ -190,11 +190,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i64(
   <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
+  ptr,
   <vscale x 1 x i64>,
   i64);
 
-define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -203,7 +203,7 @@ define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, <
 entry:
   call void @llvm.riscv.vsoxei.nxv1i16.nxv1i64(
     <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
+    ptr %1,
     <vscale x 1 x i64> %2,
     i64 %3)
 
@@ -212,12 +212,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i64(
   <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
+  ptr,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
   i64);
 
-define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -226,7 +226,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i64(
     <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
+    ptr %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
     i64 %4)
@@ -236,11 +236,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i64(
   <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
+  ptr,
   <vscale x 2 x i64>,
   i64);
 
-define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -249,7 +249,7 @@ define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, <
 entry:
   call void @llvm.riscv.vsoxei.nxv2i16.nxv2i64(
     <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
+    ptr %1,
     <vscale x 2 x i64> %2,
     i64 %3)
 
@@ -258,12 +258,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i64(
   <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
+  ptr,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
   i64);
 
-define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -272,7 +272,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i64(
     <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
+    ptr %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
     i64 %4)
@@ -282,11 +282,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i64(
   <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
+  ptr,
   <vscale x 4 x i64>,
   i64);
 
-define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -295,7 +295,7 @@ define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, <
 entry:
   call void @llvm.riscv.vsoxei.nxv4i16.nxv4i64(
     <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
+    ptr %1,
     <vscale x 4 x i64> %2,
     i64 %3)
 
@@ -304,12 +304,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i64(
   <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
+  ptr,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
   i64);
 
-define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -318,7 +318,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i64(
     <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
+    ptr %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
     i64 %4)
@@ -328,11 +328,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i64(
   <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
+  ptr,
   <vscale x 8 x i64>,
   i64);
 
-define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -341,7 +341,7 @@ define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, <
 entry:
   call void @llvm.riscv.vsoxei.nxv8i16.nxv8i64(
     <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
+    ptr %1,
     <vscale x 8 x i64> %2,
     i64 %3)
 
@@ -350,12 +350,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i64(
   <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
+  ptr,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
   i64);
 
-define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -364,7 +364,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i64(
     <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
+    ptr %1,
     <vscale x 8 x i64> %2,
     <vscale x 8 x i1> %3,
     i64 %4)
@@ -374,11 +374,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i64(
   <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
+  ptr,
   <vscale x 1 x i64>,
   i64);
 
-define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -387,7 +387,7 @@ define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <
 entry:
   call void @llvm.riscv.vsoxei.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
+    ptr %1,
     <vscale x 1 x i64> %2,
     i64 %3)
 
@@ -396,12 +396,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i64(
   <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
+  ptr,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
   i64);
 
-define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -410,7 +410,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
+    ptr %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
     i64 %4)
@@ -420,11 +420,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i64(
   <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
+  ptr,
   <vscale x 2 x i64>,
   i64);
 
-define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -433,7 +433,7 @@ define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <
 entry:
   call void @llvm.riscv.vsoxei.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
+    ptr %1,
     <vscale x 2 x i64> %2,
     i64 %3)
 
@@ -442,12 +442,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i64(
   <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
+  ptr,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
   i64);
 
-define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -456,7 +456,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
+    ptr %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
     i64 %4)
@@ -466,11 +466,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i64(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   <vscale x 4 x i64>,
   i64);
 
-define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -479,7 +479,7 @@ define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <
 entry:
   call void @llvm.riscv.vsoxei.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
+    ptr %1,
     <vscale x 4 x i64> %2,
     i64 %3)
 
@@ -488,12 +488,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
   i64);
 
-define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -502,7 +502,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
+    ptr %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
     i64 %4)
@@ -512,11 +512,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i64(
   <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
+  ptr,
   <vscale x 8 x i64>,
   i64);
 
-define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -525,7 +525,7 @@ define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <
 entry:
   call void @llvm.riscv.vsoxei.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
+    ptr %1,
     <vscale x 8 x i64> %2,
     i64 %3)
 
@@ -534,12 +534,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i64(
   <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
+  ptr,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
   i64);
 
-define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -548,7 +548,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
+    ptr %1,
     <vscale x 8 x i64> %2,
     <vscale x 8 x i1> %3,
     i64 %4)
@@ -558,11 +558,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   <vscale x 1 x i64>,
   i64);
 
-define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -571,7 +571,7 @@ define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <
 entry:
   call void @llvm.riscv.vsoxei.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
+    ptr %1,
     <vscale x 1 x i64> %2,
     i64 %3)
 
@@ -580,12 +580,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
   i64);
 
-define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -594,7 +594,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
+    ptr %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
     i64 %4)
@@ -604,11 +604,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
+  ptr,
   <vscale x 2 x i64>,
   i64);
 
-define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -617,7 +617,7 @@ define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <
 entry:
   call void @llvm.riscv.vsoxei.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
+    ptr %1,
     <vscale x 2 x i64> %2,
     i64 %3)
 
@@ -626,12 +626,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
+  ptr,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
   i64);
 
-define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -640,7 +640,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
+    ptr %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
     i64 %4)
@@ -650,11 +650,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
+  ptr,
   <vscale x 4 x i64>,
   i64);
 
-define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -663,7 +663,7 @@ define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <
 entry:
   call void @llvm.riscv.vsoxei.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
+    ptr %1,
     <vscale x 4 x i64> %2,
     i64 %3)
 
@@ -672,12 +672,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
+  ptr,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
   i64);
 
-define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -686,7 +686,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
+    ptr %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
     i64 %4)
@@ -696,11 +696,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i64(
   <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
+  ptr,
   <vscale x 8 x i64>,
   i64);
 
-define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -709,7 +709,7 @@ define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <
 entry:
   call void @llvm.riscv.vsoxei.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
+    ptr %1,
     <vscale x 8 x i64> %2,
     i64 %3)
 
@@ -718,12 +718,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i64(
   <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
+  ptr,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
   i64);
 
-define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -732,7 +732,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
+    ptr %1,
     <vscale x 8 x i64> %2,
     <vscale x 8 x i1> %3,
     i64 %4)
@@ -742,11 +742,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i64(
   <vscale x 1 x half>,
-  <vscale x 1 x half>*,
+  ptr,
   <vscale x 1 x i64>,
   i64);
 
-define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -755,7 +755,7 @@ define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0,
 entry:
   call void @llvm.riscv.vsoxei.nxv1f16.nxv1i64(
     <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
+    ptr %1,
     <vscale x 1 x i64> %2,
     i64 %3)
 
@@ -764,12 +764,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i64(
   <vscale x 1 x half>,
-  <vscale x 1 x half>*,
+  ptr,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
   i64);
 
-define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -778,7 +778,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i64(
     <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
+    ptr %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
     i64 %4)
@@ -788,11 +788,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i64(
   <vscale x 2 x half>,
-  <vscale x 2 x half>*,
+  ptr,
   <vscale x 2 x i64>,
   i64);
 
-define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -801,7 +801,7 @@ define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0,
 entry:
   call void @llvm.riscv.vsoxei.nxv2f16.nxv2i64(
     <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
+    ptr %1,
     <vscale x 2 x i64> %2,
     i64 %3)
 
@@ -810,12 +810,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i64(
   <vscale x 2 x half>,
-  <vscale x 2 x half>*,
+  ptr,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
   i64);
 
-define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -824,7 +824,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i64(
     <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
+    ptr %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
     i64 %4)
@@ -834,11 +834,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i64(
   <vscale x 4 x half>,
-  <vscale x 4 x half>*,
+  ptr,
   <vscale x 4 x i64>,
   i64);
 
-define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -847,7 +847,7 @@ define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0,
 entry:
   call void @llvm.riscv.vsoxei.nxv4f16.nxv4i64(
     <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
+    ptr %1,
     <vscale x 4 x i64> %2,
     i64 %3)
 
@@ -856,12 +856,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i64(
   <vscale x 4 x half>,
-  <vscale x 4 x half>*,
+  ptr,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
   i64);
 
-define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -870,7 +870,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i64(
     <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
+    ptr %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
     i64 %4)
@@ -880,11 +880,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i64(
   <vscale x 8 x half>,
-  <vscale x 8 x half>*,
+  ptr,
   <vscale x 8 x i64>,
   i64);
 
-define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -893,7 +893,7 @@ define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0,
 entry:
   call void @llvm.riscv.vsoxei.nxv8f16.nxv8i64(
     <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
+    ptr %1,
     <vscale x 8 x i64> %2,
     i64 %3)
 
@@ -902,12 +902,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i64(
   <vscale x 8 x half>,
-  <vscale x 8 x half>*,
+  ptr,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
   i64);
 
-define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -916,7 +916,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i64(
     <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
+    ptr %1,
     <vscale x 8 x i64> %2,
     <vscale x 8 x i1> %3,
     i64 %4)
@@ -926,11 +926,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i64(
   <vscale x 1 x float>,
-  <vscale x 1 x float>*,
+  ptr,
   <vscale x 1 x i64>,
   i64);
 
-define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -939,7 +939,7 @@ define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0,
 entry:
   call void @llvm.riscv.vsoxei.nxv1f32.nxv1i64(
     <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
+    ptr %1,
     <vscale x 1 x i64> %2,
     i64 %3)
 
@@ -948,12 +948,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i64(
   <vscale x 1 x float>,
-  <vscale x 1 x float>*,
+  ptr,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
   i64);
 
-define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -962,7 +962,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i64(
     <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
+    ptr %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
     i64 %4)
@@ -972,11 +972,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i64(
   <vscale x 2 x float>,
-  <vscale x 2 x float>*,
+  ptr,
   <vscale x 2 x i64>,
   i64);
 
-define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -985,7 +985,7 @@ define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0,
 entry:
   call void @llvm.riscv.vsoxei.nxv2f32.nxv2i64(
     <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
+    ptr %1,
     <vscale x 2 x i64> %2,
     i64 %3)
 
@@ -994,12 +994,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i64(
   <vscale x 2 x float>,
-  <vscale x 2 x float>*,
+  ptr,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
   i64);
 
-define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1008,7 +1008,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i64(
     <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
+    ptr %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
     i64 %4)
@@ -1018,11 +1018,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i64(
   <vscale x 4 x float>,
-  <vscale x 4 x float>*,
+  ptr,
   <vscale x 4 x i64>,
   i64);
 
-define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1031,7 +1031,7 @@ define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0,
 entry:
   call void @llvm.riscv.vsoxei.nxv4f32.nxv4i64(
     <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
+    ptr %1,
     <vscale x 4 x i64> %2,
     i64 %3)
 
@@ -1040,12 +1040,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i64(
   <vscale x 4 x float>,
-  <vscale x 4 x float>*,
+  ptr,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
   i64);
 
-define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1054,7 +1054,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i64(
     <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
+    ptr %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
     i64 %4)
@@ -1064,11 +1064,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i64(
   <vscale x 8 x float>,
-  <vscale x 8 x float>*,
+  ptr,
   <vscale x 8 x i64>,
   i64);
 
-define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1077,7 +1077,7 @@ define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0,
 entry:
   call void @llvm.riscv.vsoxei.nxv8f32.nxv8i64(
     <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
+    ptr %1,
     <vscale x 8 x i64> %2,
     i64 %3)
 
@@ -1086,12 +1086,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i64(
   <vscale x 8 x float>,
-  <vscale x 8 x float>*,
+  ptr,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
   i64);
 
-define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1100,7 +1100,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i64(
     <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
+    ptr %1,
     <vscale x 8 x i64> %2,
     <vscale x 8 x i1> %3,
     i64 %4)
@@ -1110,11 +1110,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i64(
   <vscale x 1 x double>,
-  <vscale x 1 x double>*,
+  ptr,
   <vscale x 1 x i64>,
   i64);
 
-define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -1123,7 +1123,7 @@ define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0
 entry:
   call void @llvm.riscv.vsoxei.nxv1f64.nxv1i64(
     <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
+    ptr %1,
     <vscale x 1 x i64> %2,
     i64 %3)
 
@@ -1132,12 +1132,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i64(
   <vscale x 1 x double>,
-  <vscale x 1 x double>*,
+  ptr,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
   i64);
 
-define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -1146,7 +1146,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x doubl
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i64(
     <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
+    ptr %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
     i64 %4)
@@ -1156,11 +1156,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i64(
   <vscale x 2 x double>,
-  <vscale x 2 x double>*,
+  ptr,
   <vscale x 2 x i64>,
   i64);
 
-define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -1169,7 +1169,7 @@ define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0
 entry:
   call void @llvm.riscv.vsoxei.nxv2f64.nxv2i64(
     <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
+    ptr %1,
     <vscale x 2 x i64> %2,
     i64 %3)
 
@@ -1178,12 +1178,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i64(
   <vscale x 2 x double>,
-  <vscale x 2 x double>*,
+  ptr,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
   i64);
 
-define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -1192,7 +1192,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x doubl
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i64(
     <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
+    ptr %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
     i64 %4)
@@ -1202,11 +1202,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i64(
   <vscale x 4 x double>,
-  <vscale x 4 x double>*,
+  ptr,
   <vscale x 4 x i64>,
   i64);
 
-define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -1215,7 +1215,7 @@ define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0
 entry:
   call void @llvm.riscv.vsoxei.nxv4f64.nxv4i64(
     <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
+    ptr %1,
     <vscale x 4 x i64> %2,
     i64 %3)
 
@@ -1224,12 +1224,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i64(
   <vscale x 4 x double>,
-  <vscale x 4 x double>*,
+  ptr,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
   i64);
 
-define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -1238,7 +1238,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x doubl
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i64(
     <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
+    ptr %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
     i64 %4)
@@ -1248,11 +1248,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i64(
   <vscale x 8 x double>,
-  <vscale x 8 x double>*,
+  ptr,
   <vscale x 8 x i64>,
   i64);
 
-define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -1261,7 +1261,7 @@ define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0
 entry:
   call void @llvm.riscv.vsoxei.nxv8f64.nxv8i64(
     <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
+    ptr %1,
     <vscale x 8 x i64> %2,
     i64 %3)
 
@@ -1270,12 +1270,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i64(
   <vscale x 8 x double>,
-  <vscale x 8 x double>*,
+  ptr,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
   i64);
 
-define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -1284,7 +1284,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x doubl
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i64(
     <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
+    ptr %1,
     <vscale x 8 x i64> %2,
     <vscale x 8 x i1> %3,
     i64 %4)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsoxei.ll b/llvm/test/CodeGen/RISCV/rvv/vsoxei.ll
index 07ed1ce0c5a63..bcb00242741cb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsoxei.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsoxei.ll
@@ -6,11 +6,11 @@
 
 declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i32(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   <vscale x 1 x i32>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -19,7 +19,7 @@ define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vsc
 entry:
   call void @llvm.riscv.vsoxei.nxv1i8.nxv1i32(
     <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
+    ptr %1,
     <vscale x 1 x i32> %2,
     iXLen %3)
 
@@ -28,12 +28,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i32(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -42,7 +42,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0,
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i32(
     <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
+    ptr %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
     iXLen %4)
@@ -52,11 +52,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i32(
   <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
+  ptr,
   <vscale x 2 x i32>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -65,7 +65,7 @@ define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vsc
 entry:
   call void @llvm.riscv.vsoxei.nxv2i8.nxv2i32(
     <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
+    ptr %1,
     <vscale x 2 x i32> %2,
     iXLen %3)
 
@@ -74,12 +74,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i32(
   <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
+  ptr,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -88,7 +88,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0,
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i32(
     <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
+    ptr %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
     iXLen %4)
@@ -98,11 +98,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i32(
   <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
+  ptr,
   <vscale x 4 x i32>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -111,7 +111,7 @@ define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vsc
 entry:
   call void @llvm.riscv.vsoxei.nxv4i8.nxv4i32(
     <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
+    ptr %1,
     <vscale x 4 x i32> %2,
     iXLen %3)
 
@@ -120,12 +120,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i32(
   <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
+  ptr,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -134,7 +134,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0,
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i32(
     <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
+    ptr %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
     iXLen %4)
@@ -144,11 +144,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i32(
   <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
+  ptr,
   <vscale x 8 x i32>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -157,7 +157,7 @@ define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vsc
 entry:
   call void @llvm.riscv.vsoxei.nxv8i8.nxv8i32(
     <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
+    ptr %1,
     <vscale x 8 x i32> %2,
     iXLen %3)
 
@@ -166,12 +166,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i32(
   <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
+  ptr,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -180,7 +180,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0,
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i32(
     <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
+    ptr %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
     iXLen %4)
@@ -190,11 +190,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv16i8.nxv16i32(
   <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
+  ptr,
   <vscale x 16 x i32>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -203,7 +203,7 @@ define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0,
 entry:
   call void @llvm.riscv.vsoxei.nxv16i8.nxv16i32(
     <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
+    ptr %1,
     <vscale x 16 x i32> %2,
     iXLen %3)
 
@@ -212,12 +212,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i32(
   <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
+  ptr,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -226,7 +226,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i32(
     <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
+    ptr %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
     iXLen %4)
@@ -236,11 +236,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i32(
   <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
+  ptr,
   <vscale x 1 x i32>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -249,7 +249,7 @@ define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <
 entry:
   call void @llvm.riscv.vsoxei.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
+    ptr %1,
     <vscale x 1 x i32> %2,
     iXLen %3)
 
@@ -258,12 +258,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i32(
   <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
+  ptr,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -272,7 +272,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
+    ptr %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
     iXLen %4)
@@ -282,11 +282,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i32(
   <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
+  ptr,
   <vscale x 2 x i32>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -295,7 +295,7 @@ define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <
 entry:
   call void @llvm.riscv.vsoxei.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
+    ptr %1,
     <vscale x 2 x i32> %2,
     iXLen %3)
 
@@ -304,12 +304,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i32(
   <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
+  ptr,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -318,7 +318,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
+    ptr %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
     iXLen %4)
@@ -328,11 +328,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i32(
   <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
+  ptr,
   <vscale x 4 x i32>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -341,7 +341,7 @@ define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <
 entry:
   call void @llvm.riscv.vsoxei.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
+    ptr %1,
     <vscale x 4 x i32> %2,
     iXLen %3)
 
@@ -350,12 +350,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i32(
   <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
+  ptr,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -364,7 +364,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
+    ptr %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
     iXLen %4)
@@ -374,11 +374,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i32(
   <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
+  ptr,
   <vscale x 8 x i32>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -387,7 +387,7 @@ define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <
 entry:
   call void @llvm.riscv.vsoxei.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
+    ptr %1,
     <vscale x 8 x i32> %2,
     iXLen %3)
 
@@ -396,12 +396,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i32(
   <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
+  ptr,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -410,7 +410,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
+    ptr %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
     iXLen %4)
@@ -420,11 +420,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv16i16.nxv16i32(
   <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
+  ptr,
   <vscale x 16 x i32>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -433,7 +433,7 @@ define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %
 entry:
   call void @llvm.riscv.vsoxei.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
+    ptr %1,
     <vscale x 16 x i32> %2,
     iXLen %3)
 
@@ -442,12 +442,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i32(
   <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
+  ptr,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -456,7 +456,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
+    ptr %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
     iXLen %4)
@@ -466,11 +466,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
+  ptr,
   <vscale x 1 x i32>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -479,7 +479,7 @@ define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <
 entry:
   call void @llvm.riscv.vsoxei.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
+    ptr %1,
     <vscale x 1 x i32> %2,
     iXLen %3)
 
@@ -488,12 +488,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
+  ptr,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -502,7 +502,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
+    ptr %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
     iXLen %4)
@@ -512,11 +512,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
+  ptr,
   <vscale x 2 x i32>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -525,7 +525,7 @@ define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <
 entry:
   call void @llvm.riscv.vsoxei.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
+    ptr %1,
     <vscale x 2 x i32> %2,
     iXLen %3)
 
@@ -534,12 +534,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
+  ptr,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -548,7 +548,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
+    ptr %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
     iXLen %4)
@@ -558,11 +558,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   <vscale x 4 x i32>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -571,7 +571,7 @@ define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <
 entry:
   call void @llvm.riscv.vsoxei.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
+    ptr %1,
     <vscale x 4 x i32> %2,
     iXLen %3)
 
@@ -580,12 +580,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -594,7 +594,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
+    ptr %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
     iXLen %4)
@@ -604,11 +604,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
+  ptr,
   <vscale x 8 x i32>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -617,7 +617,7 @@ define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <
 entry:
   call void @llvm.riscv.vsoxei.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
+    ptr %1,
     <vscale x 8 x i32> %2,
     iXLen %3)
 
@@ -626,12 +626,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
+  ptr,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -640,7 +640,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
+    ptr %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
     iXLen %4)
@@ -650,11 +650,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
+  ptr,
   <vscale x 16 x i32>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -663,7 +663,7 @@ define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %
 entry:
   call void @llvm.riscv.vsoxei.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
+    ptr %1,
     <vscale x 16 x i32> %2,
     iXLen %3)
 
@@ -672,12 +672,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
+  ptr,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -686,7 +686,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
+    ptr %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
     iXLen %4)
@@ -696,11 +696,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i32(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   <vscale x 1 x i32>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -709,7 +709,7 @@ define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <
 entry:
   call void @llvm.riscv.vsoxei.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
+    ptr %1,
     <vscale x 1 x i32> %2,
     iXLen %3)
 
@@ -718,12 +718,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i32(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -732,7 +732,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
+    ptr %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
     iXLen %4)
@@ -742,11 +742,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i32(
   <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
+  ptr,
   <vscale x 2 x i32>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -755,7 +755,7 @@ define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <
 entry:
   call void @llvm.riscv.vsoxei.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
+    ptr %1,
     <vscale x 2 x i32> %2,
     iXLen %3)
 
@@ -764,12 +764,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i32(
   <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
+  ptr,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -778,7 +778,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
+    ptr %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
     iXLen %4)
@@ -788,11 +788,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i32(
   <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
+  ptr,
   <vscale x 4 x i32>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -801,7 +801,7 @@ define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <
 entry:
   call void @llvm.riscv.vsoxei.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
+    ptr %1,
     <vscale x 4 x i32> %2,
     iXLen %3)
 
@@ -810,12 +810,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i32(
   <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
+  ptr,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -824,7 +824,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
+    ptr %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
     iXLen %4)
@@ -834,11 +834,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i32(
   <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
+  ptr,
   <vscale x 8 x i32>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -847,7 +847,7 @@ define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <
 entry:
   call void @llvm.riscv.vsoxei.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
+    ptr %1,
     <vscale x 8 x i32> %2,
     iXLen %3)
 
@@ -856,12 +856,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i32(
   <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
+  ptr,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -870,7 +870,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
+    ptr %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
     iXLen %4)
@@ -880,11 +880,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i32(
   <vscale x 1 x half>,
-  <vscale x 1 x half>*,
+  ptr,
   <vscale x 1 x i32>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -893,7 +893,7 @@ define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0,
 entry:
   call void @llvm.riscv.vsoxei.nxv1f16.nxv1i32(
     <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
+    ptr %1,
     <vscale x 1 x i32> %2,
     iXLen %3)
 
@@ -902,12 +902,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i32(
   <vscale x 1 x half>,
-  <vscale x 1 x half>*,
+  ptr,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -916,7 +916,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i32(
     <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
+    ptr %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
     iXLen %4)
@@ -926,11 +926,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i32(
   <vscale x 2 x half>,
-  <vscale x 2 x half>*,
+  ptr,
   <vscale x 2 x i32>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -939,7 +939,7 @@ define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0,
 entry:
   call void @llvm.riscv.vsoxei.nxv2f16.nxv2i32(
     <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
+    ptr %1,
     <vscale x 2 x i32> %2,
     iXLen %3)
 
@@ -948,12 +948,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i32(
   <vscale x 2 x half>,
-  <vscale x 2 x half>*,
+  ptr,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -962,7 +962,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i32(
     <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
+    ptr %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
     iXLen %4)
@@ -972,11 +972,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i32(
   <vscale x 4 x half>,
-  <vscale x 4 x half>*,
+  ptr,
   <vscale x 4 x i32>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -985,7 +985,7 @@ define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0,
 entry:
   call void @llvm.riscv.vsoxei.nxv4f16.nxv4i32(
     <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
+    ptr %1,
     <vscale x 4 x i32> %2,
     iXLen %3)
 
@@ -994,12 +994,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i32(
   <vscale x 4 x half>,
-  <vscale x 4 x half>*,
+  ptr,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1008,7 +1008,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i32(
     <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
+    ptr %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
     iXLen %4)
@@ -1018,11 +1018,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i32(
   <vscale x 8 x half>,
-  <vscale x 8 x half>*,
+  ptr,
   <vscale x 8 x i32>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1031,7 +1031,7 @@ define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0,
 entry:
   call void @llvm.riscv.vsoxei.nxv8f16.nxv8i32(
     <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
+    ptr %1,
     <vscale x 8 x i32> %2,
     iXLen %3)
 
@@ -1040,12 +1040,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i32(
   <vscale x 8 x half>,
-  <vscale x 8 x half>*,
+  ptr,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1054,7 +1054,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i32(
     <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
+    ptr %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
     iXLen %4)
@@ -1064,11 +1064,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv16f16.nxv16i32(
   <vscale x 16 x half>,
-  <vscale x 16 x half>*,
+  ptr,
   <vscale x 16 x i32>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1077,7 +1077,7 @@ define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half>
 entry:
   call void @llvm.riscv.vsoxei.nxv16f16.nxv16i32(
     <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
+    ptr %1,
     <vscale x 16 x i32> %2,
     iXLen %3)
 
@@ -1086,12 +1086,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i32(
   <vscale x 16 x half>,
-  <vscale x 16 x half>*,
+  ptr,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1100,7 +1100,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x h
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i32(
     <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
+    ptr %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
     iXLen %4)
@@ -1110,11 +1110,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i32(
   <vscale x 1 x float>,
-  <vscale x 1 x float>*,
+  ptr,
   <vscale x 1 x i32>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1123,7 +1123,7 @@ define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0,
 entry:
   call void @llvm.riscv.vsoxei.nxv1f32.nxv1i32(
     <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
+    ptr %1,
     <vscale x 1 x i32> %2,
     iXLen %3)
 
@@ -1132,12 +1132,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i32(
   <vscale x 1 x float>,
-  <vscale x 1 x float>*,
+  ptr,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1146,7 +1146,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i32(
     <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
+    ptr %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
     iXLen %4)
@@ -1156,11 +1156,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i32(
   <vscale x 2 x float>,
-  <vscale x 2 x float>*,
+  ptr,
   <vscale x 2 x i32>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1169,7 +1169,7 @@ define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0,
 entry:
   call void @llvm.riscv.vsoxei.nxv2f32.nxv2i32(
     <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
+    ptr %1,
     <vscale x 2 x i32> %2,
     iXLen %3)
 
@@ -1178,12 +1178,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i32(
   <vscale x 2 x float>,
-  <vscale x 2 x float>*,
+  ptr,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1192,7 +1192,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i32(
     <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
+    ptr %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
     iXLen %4)
@@ -1202,11 +1202,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i32(
   <vscale x 4 x float>,
-  <vscale x 4 x float>*,
+  ptr,
   <vscale x 4 x i32>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1215,7 +1215,7 @@ define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0,
 entry:
   call void @llvm.riscv.vsoxei.nxv4f32.nxv4i32(
     <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
+    ptr %1,
     <vscale x 4 x i32> %2,
     iXLen %3)
 
@@ -1224,12 +1224,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i32(
   <vscale x 4 x float>,
-  <vscale x 4 x float>*,
+  ptr,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1238,7 +1238,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i32(
     <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
+    ptr %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
     iXLen %4)
@@ -1248,11 +1248,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i32(
   <vscale x 8 x float>,
-  <vscale x 8 x float>*,
+  ptr,
   <vscale x 8 x i32>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1261,7 +1261,7 @@ define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0,
 entry:
   call void @llvm.riscv.vsoxei.nxv8f32.nxv8i32(
     <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
+    ptr %1,
     <vscale x 8 x i32> %2,
     iXLen %3)
 
@@ -1270,12 +1270,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i32(
   <vscale x 8 x float>,
-  <vscale x 8 x float>*,
+  ptr,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1284,7 +1284,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i32(
     <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
+    ptr %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
     iXLen %4)
@@ -1294,11 +1294,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv16f32.nxv16i32(
   <vscale x 16 x float>,
-  <vscale x 16 x float>*,
+  ptr,
   <vscale x 16 x i32>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -1307,7 +1307,7 @@ define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float>
 entry:
   call void @llvm.riscv.vsoxei.nxv16f32.nxv16i32(
     <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
+    ptr %1,
     <vscale x 16 x i32> %2,
     iXLen %3)
 
@@ -1316,12 +1316,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i32(
   <vscale x 16 x float>,
-  <vscale x 16 x float>*,
+  ptr,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -1330,7 +1330,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x f
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i32(
     <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
+    ptr %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
     iXLen %4)
@@ -1340,11 +1340,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i32(
   <vscale x 1 x double>,
-  <vscale x 1 x double>*,
+  ptr,
   <vscale x 1 x i32>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -1353,7 +1353,7 @@ define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0
 entry:
   call void @llvm.riscv.vsoxei.nxv1f64.nxv1i32(
     <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
+    ptr %1,
     <vscale x 1 x i32> %2,
     iXLen %3)
 
@@ -1362,12 +1362,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i32(
   <vscale x 1 x double>,
-  <vscale x 1 x double>*,
+  ptr,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -1376,7 +1376,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x doubl
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i32(
     <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
+    ptr %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
     iXLen %4)
@@ -1386,11 +1386,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i32(
   <vscale x 2 x double>,
-  <vscale x 2 x double>*,
+  ptr,
   <vscale x 2 x i32>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -1399,7 +1399,7 @@ define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0
 entry:
   call void @llvm.riscv.vsoxei.nxv2f64.nxv2i32(
     <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
+    ptr %1,
     <vscale x 2 x i32> %2,
     iXLen %3)
 
@@ -1408,12 +1408,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i32(
   <vscale x 2 x double>,
-  <vscale x 2 x double>*,
+  ptr,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -1422,7 +1422,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x doubl
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i32(
     <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
+    ptr %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
     iXLen %4)
@@ -1432,11 +1432,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i32(
   <vscale x 4 x double>,
-  <vscale x 4 x double>*,
+  ptr,
   <vscale x 4 x i32>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -1445,7 +1445,7 @@ define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0
 entry:
   call void @llvm.riscv.vsoxei.nxv4f64.nxv4i32(
     <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
+    ptr %1,
     <vscale x 4 x i32> %2,
     iXLen %3)
 
@@ -1454,12 +1454,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i32(
   <vscale x 4 x double>,
-  <vscale x 4 x double>*,
+  ptr,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -1468,7 +1468,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x doubl
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i32(
     <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
+    ptr %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
     iXLen %4)
@@ -1478,11 +1478,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i32(
   <vscale x 8 x double>,
-  <vscale x 8 x double>*,
+  ptr,
   <vscale x 8 x i32>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -1491,7 +1491,7 @@ define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0
 entry:
   call void @llvm.riscv.vsoxei.nxv8f64.nxv8i32(
     <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
+    ptr %1,
     <vscale x 8 x i32> %2,
     iXLen %3)
 
@@ -1500,12 +1500,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i32(
   <vscale x 8 x double>,
-  <vscale x 8 x double>*,
+  ptr,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -1514,7 +1514,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x doubl
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i32(
     <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
+    ptr %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
     iXLen %4)
@@ -1524,11 +1524,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i16(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   <vscale x 1 x i16>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -1537,7 +1537,7 @@ define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vsc
 entry:
   call void @llvm.riscv.vsoxei.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
+    ptr %1,
     <vscale x 1 x i16> %2,
     iXLen %3)
 
@@ -1546,12 +1546,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i16(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -1560,7 +1560,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0,
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
+    ptr %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
     iXLen %4)
@@ -1570,11 +1570,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i16(
   <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
+  ptr,
   <vscale x 2 x i16>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -1583,7 +1583,7 @@ define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vsc
 entry:
   call void @llvm.riscv.vsoxei.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
+    ptr %1,
     <vscale x 2 x i16> %2,
     iXLen %3)
 
@@ -1592,12 +1592,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i16(
   <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
+  ptr,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -1606,7 +1606,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0,
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
+    ptr %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
     iXLen %4)
@@ -1616,11 +1616,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i16(
   <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
+  ptr,
   <vscale x 4 x i16>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -1629,7 +1629,7 @@ define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vsc
 entry:
   call void @llvm.riscv.vsoxei.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
+    ptr %1,
     <vscale x 4 x i16> %2,
     iXLen %3)
 
@@ -1638,12 +1638,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i16(
   <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
+  ptr,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -1652,7 +1652,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0,
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
+    ptr %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
     iXLen %4)
@@ -1662,11 +1662,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i16(
   <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
+  ptr,
   <vscale x 8 x i16>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -1675,7 +1675,7 @@ define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vsc
 entry:
   call void @llvm.riscv.vsoxei.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
+    ptr %1,
     <vscale x 8 x i16> %2,
     iXLen %3)
 
@@ -1684,12 +1684,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i16(
   <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
+  ptr,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -1698,7 +1698,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0,
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
+    ptr %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
     iXLen %4)
@@ -1708,11 +1708,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv16i8.nxv16i16(
   <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
+  ptr,
   <vscale x 16 x i16>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -1721,7 +1721,7 @@ define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0,
 entry:
   call void @llvm.riscv.vsoxei.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
+    ptr %1,
     <vscale x 16 x i16> %2,
     iXLen %3)
 
@@ -1730,12 +1730,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i16(
   <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
+  ptr,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -1744,7 +1744,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
+    ptr %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
     iXLen %4)
@@ -1754,11 +1754,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv32i8.nxv32i16(
   <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
+  ptr,
   <vscale x 32 x i16>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -1767,7 +1767,7 @@ define void @intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0,
 entry:
   call void @llvm.riscv.vsoxei.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
-    <vscale x 32 x i8>* %1,
+    ptr %1,
     <vscale x 32 x i16> %2,
     iXLen %3)
 
@@ -1776,12 +1776,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i16(
   <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
+  ptr,
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -1790,7 +1790,7 @@ define void @intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
-    <vscale x 32 x i8>* %1,
+    ptr %1,
     <vscale x 32 x i16> %2,
     <vscale x 32 x i1> %3,
     iXLen %4)
@@ -1800,11 +1800,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
+  ptr,
   <vscale x 1 x i16>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1813,7 +1813,7 @@ define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <
 entry:
   call void @llvm.riscv.vsoxei.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
+    ptr %1,
     <vscale x 1 x i16> %2,
     iXLen %3)
 
@@ -1822,12 +1822,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
+  ptr,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1836,7 +1836,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
+    ptr %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
     iXLen %4)
@@ -1846,11 +1846,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
+  ptr,
   <vscale x 2 x i16>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1859,7 +1859,7 @@ define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <
 entry:
   call void @llvm.riscv.vsoxei.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
+    ptr %1,
     <vscale x 2 x i16> %2,
     iXLen %3)
 
@@ -1868,12 +1868,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
+  ptr,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1882,7 +1882,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
+    ptr %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
     iXLen %4)
@@ -1892,11 +1892,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
+  ptr,
   <vscale x 4 x i16>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1905,7 +1905,7 @@ define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <
 entry:
   call void @llvm.riscv.vsoxei.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
+    ptr %1,
     <vscale x 4 x i16> %2,
     iXLen %3)
 
@@ -1914,12 +1914,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
+  ptr,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1928,7 +1928,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
+    ptr %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
     iXLen %4)
@@ -1938,11 +1938,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
+  ptr,
   <vscale x 8 x i16>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1951,7 +1951,7 @@ define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <
 entry:
   call void @llvm.riscv.vsoxei.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
+    ptr %1,
     <vscale x 8 x i16> %2,
     iXLen %3)
 
@@ -1960,12 +1960,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
+  ptr,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1974,7 +1974,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
+    ptr %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
     iXLen %4)
@@ -1984,11 +1984,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
+  ptr,
   <vscale x 16 x i16>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1997,7 +1997,7 @@ define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %
 entry:
   call void @llvm.riscv.vsoxei.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
+    ptr %1,
     <vscale x 16 x i16> %2,
     iXLen %3)
 
@@ -2006,12 +2006,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
+  ptr,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -2020,7 +2020,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
+    ptr %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
     iXLen %4)
@@ -2030,11 +2030,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
+  ptr,
   <vscale x 32 x i16>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -2043,7 +2043,7 @@ define void @intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %
 entry:
   call void @llvm.riscv.vsoxei.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
-    <vscale x 32 x i16>* %1,
+    ptr %1,
     <vscale x 32 x i16> %2,
     iXLen %3)
 
@@ -2052,12 +2052,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
+  ptr,
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -2066,7 +2066,7 @@ define void @intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
-    <vscale x 32 x i16>* %1,
+    ptr %1,
     <vscale x 32 x i16> %2,
     <vscale x 32 x i1> %3,
     iXLen %4)
@@ -2076,11 +2076,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i16(
   <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
+  ptr,
   <vscale x 1 x i16>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -2089,7 +2089,7 @@ define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <
 entry:
   call void @llvm.riscv.vsoxei.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
+    ptr %1,
     <vscale x 1 x i16> %2,
     iXLen %3)
 
@@ -2098,12 +2098,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16(
   <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
+  ptr,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -2112,7 +2112,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
+    ptr %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
     iXLen %4)
@@ -2122,11 +2122,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i16(
   <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
+  ptr,
   <vscale x 2 x i16>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -2135,7 +2135,7 @@ define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <
 entry:
   call void @llvm.riscv.vsoxei.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
+    ptr %1,
     <vscale x 2 x i16> %2,
     iXLen %3)
 
@@ -2144,12 +2144,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i16(
   <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
+  ptr,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -2158,7 +2158,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
+    ptr %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
     iXLen %4)
@@ -2168,11 +2168,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i16(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   <vscale x 4 x i16>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -2181,7 +2181,7 @@ define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <
 entry:
   call void @llvm.riscv.vsoxei.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
+    ptr %1,
     <vscale x 4 x i16> %2,
     iXLen %3)
 
@@ -2190,12 +2190,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i16(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -2204,7 +2204,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
+    ptr %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
     iXLen %4)
@@ -2214,11 +2214,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i16(
   <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
+  ptr,
   <vscale x 8 x i16>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -2227,7 +2227,7 @@ define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <
 entry:
   call void @llvm.riscv.vsoxei.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
+    ptr %1,
     <vscale x 8 x i16> %2,
     iXLen %3)
 
@@ -2236,12 +2236,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i16(
   <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
+  ptr,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -2250,7 +2250,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
+    ptr %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
     iXLen %4)
@@ -2260,11 +2260,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv16i32.nxv16i16(
   <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
+  ptr,
   <vscale x 16 x i16>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -2273,7 +2273,7 @@ define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %
 entry:
   call void @llvm.riscv.vsoxei.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
+    ptr %1,
     <vscale x 16 x i16> %2,
     iXLen %3)
 
@@ -2282,12 +2282,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i16(
   <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
+  ptr,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -2296,7 +2296,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
+    ptr %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
     iXLen %4)
@@ -2306,11 +2306,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i16(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   <vscale x 1 x i16>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -2319,7 +2319,7 @@ define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, <
 entry:
   call void @llvm.riscv.vsoxei.nxv1i64.nxv1i16(
     <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
+    ptr %1,
     <vscale x 1 x i16> %2,
     iXLen %3)
 
@@ -2328,12 +2328,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i16(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -2342,7 +2342,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
+    ptr %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
     iXLen %4)
@@ -2352,11 +2352,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i16(
   <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
+  ptr,
   <vscale x 2 x i16>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -2365,7 +2365,7 @@ define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, <
 entry:
   call void @llvm.riscv.vsoxei.nxv2i64.nxv2i16(
     <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
+    ptr %1,
     <vscale x 2 x i16> %2,
     iXLen %3)
 
@@ -2374,12 +2374,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i16(
   <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
+  ptr,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -2388,7 +2388,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
+    ptr %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
     iXLen %4)
@@ -2398,11 +2398,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i16(
   <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
+  ptr,
   <vscale x 4 x i16>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -2411,7 +2411,7 @@ define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <
 entry:
   call void @llvm.riscv.vsoxei.nxv4i64.nxv4i16(
     <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
+    ptr %1,
     <vscale x 4 x i16> %2,
     iXLen %3)
 
@@ -2420,12 +2420,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i16(
   <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
+  ptr,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -2434,7 +2434,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
+    ptr %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
     iXLen %4)
@@ -2444,11 +2444,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i16(
   <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
+  ptr,
   <vscale x 8 x i16>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -2457,7 +2457,7 @@ define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <
 entry:
   call void @llvm.riscv.vsoxei.nxv8i64.nxv8i16(
     <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
+    ptr %1,
     <vscale x 8 x i16> %2,
     iXLen %3)
 
@@ -2466,12 +2466,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i16(
   <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
+  ptr,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -2480,7 +2480,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
+    ptr %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
     iXLen %4)
@@ -2490,11 +2490,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i16(
   <vscale x 1 x half>,
-  <vscale x 1 x half>*,
+  ptr,
   <vscale x 1 x i16>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -2503,7 +2503,7 @@ define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0,
 entry:
   call void @llvm.riscv.vsoxei.nxv1f16.nxv1i16(
     <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
+    ptr %1,
     <vscale x 1 x i16> %2,
     iXLen %3)
 
@@ -2512,12 +2512,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i16(
   <vscale x 1 x half>,
-  <vscale x 1 x half>*,
+  ptr,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -2526,7 +2526,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i16(
     <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
+    ptr %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
     iXLen %4)
@@ -2536,11 +2536,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i16(
   <vscale x 2 x half>,
-  <vscale x 2 x half>*,
+  ptr,
   <vscale x 2 x i16>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -2549,7 +2549,7 @@ define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0,
 entry:
   call void @llvm.riscv.vsoxei.nxv2f16.nxv2i16(
     <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
+    ptr %1,
     <vscale x 2 x i16> %2,
     iXLen %3)
 
@@ -2558,12 +2558,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i16(
   <vscale x 2 x half>,
-  <vscale x 2 x half>*,
+  ptr,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -2572,7 +2572,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i16(
     <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
+    ptr %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
     iXLen %4)
@@ -2582,11 +2582,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i16(
   <vscale x 4 x half>,
-  <vscale x 4 x half>*,
+  ptr,
   <vscale x 4 x i16>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -2595,7 +2595,7 @@ define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0,
 entry:
   call void @llvm.riscv.vsoxei.nxv4f16.nxv4i16(
     <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
+    ptr %1,
     <vscale x 4 x i16> %2,
     iXLen %3)
 
@@ -2604,12 +2604,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i16(
   <vscale x 4 x half>,
-  <vscale x 4 x half>*,
+  ptr,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -2618,7 +2618,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i16(
     <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
+    ptr %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
     iXLen %4)
@@ -2628,11 +2628,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i16(
   <vscale x 8 x half>,
-  <vscale x 8 x half>*,
+  ptr,
   <vscale x 8 x i16>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -2641,7 +2641,7 @@ define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0,
 entry:
   call void @llvm.riscv.vsoxei.nxv8f16.nxv8i16(
     <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
+    ptr %1,
     <vscale x 8 x i16> %2,
     iXLen %3)
 
@@ -2650,12 +2650,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i16(
   <vscale x 8 x half>,
-  <vscale x 8 x half>*,
+  ptr,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -2664,7 +2664,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i16(
     <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
+    ptr %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
     iXLen %4)
@@ -2674,11 +2674,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv16f16.nxv16i16(
   <vscale x 16 x half>,
-  <vscale x 16 x half>*,
+  ptr,
   <vscale x 16 x i16>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -2687,7 +2687,7 @@ define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half>
 entry:
   call void @llvm.riscv.vsoxei.nxv16f16.nxv16i16(
     <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
+    ptr %1,
     <vscale x 16 x i16> %2,
     iXLen %3)
 
@@ -2696,12 +2696,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i16(
   <vscale x 16 x half>,
-  <vscale x 16 x half>*,
+  ptr,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -2710,7 +2710,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x h
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i16(
     <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
+    ptr %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
     iXLen %4)
@@ -2720,11 +2720,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv32f16.nxv32i16(
   <vscale x 32 x half>,
-  <vscale x 32 x half>*,
+  ptr,
   <vscale x 32 x i16>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -2733,7 +2733,7 @@ define void @intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half>
 entry:
   call void @llvm.riscv.vsoxei.nxv32f16.nxv32i16(
     <vscale x 32 x half> %0,
-    <vscale x 32 x half>* %1,
+    ptr %1,
     <vscale x 32 x i16> %2,
     iXLen %3)
 
@@ -2742,12 +2742,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i16(
   <vscale x 32 x half>,
-  <vscale x 32 x half>*,
+  ptr,
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -2756,7 +2756,7 @@ define void @intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x h
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i16(
     <vscale x 32 x half> %0,
-    <vscale x 32 x half>* %1,
+    ptr %1,
     <vscale x 32 x i16> %2,
     <vscale x 32 x i1> %3,
     iXLen %4)
@@ -2766,11 +2766,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i16(
   <vscale x 1 x float>,
-  <vscale x 1 x float>*,
+  ptr,
   <vscale x 1 x i16>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -2779,7 +2779,7 @@ define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0,
 entry:
   call void @llvm.riscv.vsoxei.nxv1f32.nxv1i16(
     <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
+    ptr %1,
     <vscale x 1 x i16> %2,
     iXLen %3)
 
@@ -2788,12 +2788,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i16(
   <vscale x 1 x float>,
-  <vscale x 1 x float>*,
+  ptr,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -2802,7 +2802,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i16(
     <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
+    ptr %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
     iXLen %4)
@@ -2812,11 +2812,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i16(
   <vscale x 2 x float>,
-  <vscale x 2 x float>*,
+  ptr,
   <vscale x 2 x i16>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -2825,7 +2825,7 @@ define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0,
 entry:
   call void @llvm.riscv.vsoxei.nxv2f32.nxv2i16(
     <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
+    ptr %1,
     <vscale x 2 x i16> %2,
     iXLen %3)
 
@@ -2834,12 +2834,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i16(
   <vscale x 2 x float>,
-  <vscale x 2 x float>*,
+  ptr,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -2848,7 +2848,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i16(
     <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
+    ptr %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
     iXLen %4)
@@ -2858,11 +2858,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i16(
   <vscale x 4 x float>,
-  <vscale x 4 x float>*,
+  ptr,
   <vscale x 4 x i16>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -2871,7 +2871,7 @@ define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0,
 entry:
   call void @llvm.riscv.vsoxei.nxv4f32.nxv4i16(
     <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
+    ptr %1,
     <vscale x 4 x i16> %2,
     iXLen %3)
 
@@ -2880,12 +2880,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i16(
   <vscale x 4 x float>,
-  <vscale x 4 x float>*,
+  ptr,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -2894,7 +2894,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i16(
     <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
+    ptr %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
     iXLen %4)
@@ -2904,11 +2904,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i16(
   <vscale x 8 x float>,
-  <vscale x 8 x float>*,
+  ptr,
   <vscale x 8 x i16>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -2917,7 +2917,7 @@ define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0,
 entry:
   call void @llvm.riscv.vsoxei.nxv8f32.nxv8i16(
     <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
+    ptr %1,
     <vscale x 8 x i16> %2,
     iXLen %3)
 
@@ -2926,12 +2926,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i16(
   <vscale x 8 x float>,
-  <vscale x 8 x float>*,
+  ptr,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -2940,7 +2940,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i16(
     <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
+    ptr %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
     iXLen %4)
@@ -2950,11 +2950,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv16f32.nxv16i16(
   <vscale x 16 x float>,
-  <vscale x 16 x float>*,
+  ptr,
   <vscale x 16 x i16>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -2963,7 +2963,7 @@ define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float>
 entry:
   call void @llvm.riscv.vsoxei.nxv16f32.nxv16i16(
     <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
+    ptr %1,
     <vscale x 16 x i16> %2,
     iXLen %3)
 
@@ -2972,12 +2972,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i16(
   <vscale x 16 x float>,
-  <vscale x 16 x float>*,
+  ptr,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -2986,7 +2986,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x f
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i16(
     <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
+    ptr %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
     iXLen %4)
@@ -2996,11 +2996,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i16(
   <vscale x 1 x double>,
-  <vscale x 1 x double>*,
+  ptr,
   <vscale x 1 x i16>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -3009,7 +3009,7 @@ define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0
 entry:
   call void @llvm.riscv.vsoxei.nxv1f64.nxv1i16(
     <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
+    ptr %1,
     <vscale x 1 x i16> %2,
     iXLen %3)
 
@@ -3018,12 +3018,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i16(
   <vscale x 1 x double>,
-  <vscale x 1 x double>*,
+  ptr,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -3032,7 +3032,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x doubl
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i16(
     <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
+    ptr %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
     iXLen %4)
@@ -3042,11 +3042,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i16(
   <vscale x 2 x double>,
-  <vscale x 2 x double>*,
+  ptr,
   <vscale x 2 x i16>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -3055,7 +3055,7 @@ define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0
 entry:
   call void @llvm.riscv.vsoxei.nxv2f64.nxv2i16(
     <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
+    ptr %1,
     <vscale x 2 x i16> %2,
     iXLen %3)
 
@@ -3064,12 +3064,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i16(
   <vscale x 2 x double>,
-  <vscale x 2 x double>*,
+  ptr,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -3078,7 +3078,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x doubl
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i16(
     <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
+    ptr %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
     iXLen %4)
@@ -3088,11 +3088,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i16(
   <vscale x 4 x double>,
-  <vscale x 4 x double>*,
+  ptr,
   <vscale x 4 x i16>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -3101,7 +3101,7 @@ define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0
 entry:
   call void @llvm.riscv.vsoxei.nxv4f64.nxv4i16(
     <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
+    ptr %1,
     <vscale x 4 x i16> %2,
     iXLen %3)
 
@@ -3110,12 +3110,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i16(
   <vscale x 4 x double>,
-  <vscale x 4 x double>*,
+  ptr,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -3124,7 +3124,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x doubl
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i16(
     <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
+    ptr %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
     iXLen %4)
@@ -3134,11 +3134,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i16(
   <vscale x 8 x double>,
-  <vscale x 8 x double>*,
+  ptr,
   <vscale x 8 x i16>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -3147,7 +3147,7 @@ define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0
 entry:
   call void @llvm.riscv.vsoxei.nxv8f64.nxv8i16(
     <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
+    ptr %1,
     <vscale x 8 x i16> %2,
     iXLen %3)
 
@@ -3156,12 +3156,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i16(
   <vscale x 8 x double>,
-  <vscale x 8 x double>*,
+  ptr,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -3170,7 +3170,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x doubl
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i16(
     <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
+    ptr %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
     iXLen %4)
@@ -3180,11 +3180,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   <vscale x 1 x i8>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -3193,7 +3193,7 @@ define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vsca
 entry:
   call void @llvm.riscv.vsoxei.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
+    ptr %1,
     <vscale x 1 x i8> %2,
     iXLen %3)
 
@@ -3202,12 +3202,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -3216,7 +3216,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0,
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
+    ptr %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
     iXLen %4)
@@ -3226,11 +3226,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
+  ptr,
   <vscale x 2 x i8>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -3239,7 +3239,7 @@ define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vsca
 entry:
   call void @llvm.riscv.vsoxei.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
+    ptr %1,
     <vscale x 2 x i8> %2,
     iXLen %3)
 
@@ -3248,12 +3248,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
+  ptr,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -3262,7 +3262,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0,
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
+    ptr %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
     iXLen %4)
@@ -3272,11 +3272,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
+  ptr,
   <vscale x 4 x i8>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -3285,7 +3285,7 @@ define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vsca
 entry:
   call void @llvm.riscv.vsoxei.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
+    ptr %1,
     <vscale x 4 x i8> %2,
     iXLen %3)
 
@@ -3294,12 +3294,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
+  ptr,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -3308,7 +3308,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0,
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
+    ptr %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
     iXLen %4)
@@ -3318,11 +3318,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
+  ptr,
   <vscale x 8 x i8>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -3331,7 +3331,7 @@ define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vsca
 entry:
   call void @llvm.riscv.vsoxei.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
+    ptr %1,
     <vscale x 8 x i8> %2,
     iXLen %3)
 
@@ -3340,12 +3340,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
+  ptr,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -3354,7 +3354,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0,
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
+    ptr %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
     iXLen %4)
@@ -3364,11 +3364,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
+  ptr,
   <vscale x 16 x i8>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -3377,7 +3377,7 @@ define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <
 entry:
   call void @llvm.riscv.vsoxei.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
+    ptr %1,
     <vscale x 16 x i8> %2,
     iXLen %3)
 
@@ -3386,12 +3386,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
+  ptr,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -3400,7 +3400,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
+    ptr %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
     iXLen %4)
@@ -3410,11 +3410,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
+  ptr,
   <vscale x 32 x i8>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -3423,7 +3423,7 @@ define void @intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <
 entry:
   call void @llvm.riscv.vsoxei.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
-    <vscale x 32 x i8>* %1,
+    ptr %1,
     <vscale x 32 x i8> %2,
     iXLen %3)
 
@@ -3432,12 +3432,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
+  ptr,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -3446,7 +3446,7 @@ define void @intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
-    <vscale x 32 x i8>* %1,
+    ptr %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
     iXLen %4)
@@ -3456,11 +3456,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
-  <vscale x 64 x i8>*,
+  ptr,
   <vscale x 64 x i8>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, ptr %1, <vscale x 64 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
@@ -3469,7 +3469,7 @@ define void @intrinsic_vsoxei_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <
 entry:
   call void @llvm.riscv.vsoxei.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
-    <vscale x 64 x i8>* %1,
+    ptr %1,
     <vscale x 64 x i8> %2,
     iXLen %3)
 
@@ -3478,12 +3478,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
-  <vscale x 64 x i8>*,
+  ptr,
   <vscale x 64 x i8>,
   <vscale x 64 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, ptr %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
@@ -3492,7 +3492,7 @@ define void @intrinsic_vsoxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
-    <vscale x 64 x i8>* %1,
+    ptr %1,
     <vscale x 64 x i8> %2,
     <vscale x 64 x i1> %3,
     iXLen %4)
@@ -3502,11 +3502,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
+  ptr,
   <vscale x 1 x i8>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -3515,7 +3515,7 @@ define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <v
 entry:
   call void @llvm.riscv.vsoxei.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
+    ptr %1,
     <vscale x 1 x i8> %2,
     iXLen %3)
 
@@ -3524,12 +3524,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
+  ptr,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -3538,7 +3538,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
+    ptr %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
     iXLen %4)
@@ -3548,11 +3548,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i8(
   <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
+  ptr,
   <vscale x 2 x i8>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -3561,7 +3561,7 @@ define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <v
 entry:
   call void @llvm.riscv.vsoxei.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
+    ptr %1,
     <vscale x 2 x i8> %2,
     iXLen %3)
 
@@ -3570,12 +3570,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i8(
   <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
+  ptr,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -3584,7 +3584,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
+    ptr %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
     iXLen %4)
@@ -3594,11 +3594,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i8(
   <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
+  ptr,
   <vscale x 4 x i8>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -3607,7 +3607,7 @@ define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <v
 entry:
   call void @llvm.riscv.vsoxei.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
+    ptr %1,
     <vscale x 4 x i8> %2,
     iXLen %3)
 
@@ -3616,12 +3616,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i8(
   <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
+  ptr,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -3630,7 +3630,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
+    ptr %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
     iXLen %4)
@@ -3640,11 +3640,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i8(
   <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
+  ptr,
   <vscale x 8 x i8>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -3653,7 +3653,7 @@ define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <v
 entry:
   call void @llvm.riscv.vsoxei.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
+    ptr %1,
     <vscale x 8 x i8> %2,
     iXLen %3)
 
@@ -3662,12 +3662,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i8(
   <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
+  ptr,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -3676,7 +3676,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
+    ptr %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
     iXLen %4)
@@ -3686,11 +3686,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv16i16.nxv16i8(
   <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
+  ptr,
   <vscale x 16 x i8>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -3699,7 +3699,7 @@ define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0
 entry:
   call void @llvm.riscv.vsoxei.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
+    ptr %1,
     <vscale x 16 x i8> %2,
     iXLen %3)
 
@@ -3708,12 +3708,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i8(
   <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
+  ptr,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -3722,7 +3722,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i1
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
+    ptr %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
     iXLen %4)
@@ -3732,11 +3732,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv32i16.nxv32i8(
   <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
+  ptr,
   <vscale x 32 x i8>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -3745,7 +3745,7 @@ define void @intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0
 entry:
   call void @llvm.riscv.vsoxei.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
-    <vscale x 32 x i16>* %1,
+    ptr %1,
     <vscale x 32 x i8> %2,
     iXLen %3)
 
@@ -3754,12 +3754,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i8(
   <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
+  ptr,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -3768,7 +3768,7 @@ define void @intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i1
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
-    <vscale x 32 x i16>* %1,
+    ptr %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
     iXLen %4)
@@ -3778,11 +3778,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i8(
   <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
+  ptr,
   <vscale x 1 x i8>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -3791,7 +3791,7 @@ define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <v
 entry:
   call void @llvm.riscv.vsoxei.nxv1i32.nxv1i8(
     <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
+    ptr %1,
     <vscale x 1 x i8> %2,
     iXLen %3)
 
@@ -3800,12 +3800,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i8(
   <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
+  ptr,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -3814,7 +3814,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
+    ptr %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
     iXLen %4)
@@ -3824,11 +3824,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i8(
   <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
+  ptr,
   <vscale x 2 x i8>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -3837,7 +3837,7 @@ define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <v
 entry:
   call void @llvm.riscv.vsoxei.nxv2i32.nxv2i8(
     <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
+    ptr %1,
     <vscale x 2 x i8> %2,
     iXLen %3)
 
@@ -3846,12 +3846,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i8(
   <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
+  ptr,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -3860,7 +3860,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
+    ptr %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
     iXLen %4)
@@ -3870,11 +3870,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i8(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   <vscale x 4 x i8>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -3883,7 +3883,7 @@ define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <v
 entry:
   call void @llvm.riscv.vsoxei.nxv4i32.nxv4i8(
     <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
+    ptr %1,
     <vscale x 4 x i8> %2,
     iXLen %3)
 
@@ -3892,12 +3892,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i8(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -3906,7 +3906,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
+    ptr %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
     iXLen %4)
@@ -3916,11 +3916,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i8(
   <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
+  ptr,
   <vscale x 8 x i8>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -3929,7 +3929,7 @@ define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <v
 entry:
   call void @llvm.riscv.vsoxei.nxv8i32.nxv8i8(
     <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
+    ptr %1,
     <vscale x 8 x i8> %2,
     iXLen %3)
 
@@ -3938,12 +3938,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i8(
   <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
+  ptr,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -3952,7 +3952,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
+    ptr %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
     iXLen %4)
@@ -3962,11 +3962,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv16i32.nxv16i8(
   <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
+  ptr,
   <vscale x 16 x i8>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -3975,7 +3975,7 @@ define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0
 entry:
   call void @llvm.riscv.vsoxei.nxv16i32.nxv16i8(
     <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
+    ptr %1,
     <vscale x 16 x i8> %2,
     iXLen %3)
 
@@ -3984,12 +3984,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i8(
   <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
+  ptr,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -3998,7 +3998,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i3
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
+    ptr %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
     iXLen %4)
@@ -4008,11 +4008,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i8(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   <vscale x 1 x i8>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -4021,7 +4021,7 @@ define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, <v
 entry:
   call void @llvm.riscv.vsoxei.nxv1i64.nxv1i8(
     <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
+    ptr %1,
     <vscale x 1 x i8> %2,
     iXLen %3)
 
@@ -4030,12 +4030,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i8(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -4044,7 +4044,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
+    ptr %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
     iXLen %4)
@@ -4054,11 +4054,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i8(
   <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
+  ptr,
   <vscale x 2 x i8>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -4067,7 +4067,7 @@ define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, <v
 entry:
   call void @llvm.riscv.vsoxei.nxv2i64.nxv2i8(
     <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
+    ptr %1,
     <vscale x 2 x i8> %2,
     iXLen %3)
 
@@ -4076,12 +4076,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i8(
   <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
+  ptr,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -4090,7 +4090,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
+    ptr %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
     iXLen %4)
@@ -4100,11 +4100,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i8(
   <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
+  ptr,
   <vscale x 4 x i8>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -4113,7 +4113,7 @@ define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, <v
 entry:
   call void @llvm.riscv.vsoxei.nxv4i64.nxv4i8(
     <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
+    ptr %1,
     <vscale x 4 x i8> %2,
     iXLen %3)
 
@@ -4122,12 +4122,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i8(
   <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
+  ptr,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -4136,7 +4136,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
+    ptr %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
     iXLen %4)
@@ -4146,11 +4146,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i8(
   <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
+  ptr,
   <vscale x 8 x i8>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -4159,7 +4159,7 @@ define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, <v
 entry:
   call void @llvm.riscv.vsoxei.nxv8i64.nxv8i8(
     <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
+    ptr %1,
     <vscale x 8 x i8> %2,
     iXLen %3)
 
@@ -4168,12 +4168,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i8(
   <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
+  ptr,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -4182,7 +4182,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
+    ptr %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
     iXLen %4)
@@ -4192,11 +4192,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i8(
   <vscale x 1 x half>,
-  <vscale x 1 x half>*,
+  ptr,
   <vscale x 1 x i8>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -4205,7 +4205,7 @@ define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <
 entry:
   call void @llvm.riscv.vsoxei.nxv1f16.nxv1i8(
     <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
+    ptr %1,
     <vscale x 1 x i8> %2,
     iXLen %3)
 
@@ -4214,12 +4214,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i8(
   <vscale x 1 x half>,
-  <vscale x 1 x half>*,
+  ptr,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -4228,7 +4228,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i8(
     <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
+    ptr %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
     iXLen %4)
@@ -4238,11 +4238,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i8(
   <vscale x 2 x half>,
-  <vscale x 2 x half>*,
+  ptr,
   <vscale x 2 x i8>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -4251,7 +4251,7 @@ define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <
 entry:
   call void @llvm.riscv.vsoxei.nxv2f16.nxv2i8(
     <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
+    ptr %1,
     <vscale x 2 x i8> %2,
     iXLen %3)
 
@@ -4260,12 +4260,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i8(
   <vscale x 2 x half>,
-  <vscale x 2 x half>*,
+  ptr,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -4274,7 +4274,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i8(
     <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
+    ptr %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
     iXLen %4)
@@ -4284,11 +4284,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i8(
   <vscale x 4 x half>,
-  <vscale x 4 x half>*,
+  ptr,
   <vscale x 4 x i8>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -4297,7 +4297,7 @@ define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <
 entry:
   call void @llvm.riscv.vsoxei.nxv4f16.nxv4i8(
     <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
+    ptr %1,
     <vscale x 4 x i8> %2,
     iXLen %3)
 
@@ -4306,12 +4306,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i8(
   <vscale x 4 x half>,
-  <vscale x 4 x half>*,
+  ptr,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -4320,7 +4320,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i8(
     <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
+    ptr %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
     iXLen %4)
@@ -4330,11 +4330,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i8(
   <vscale x 8 x half>,
-  <vscale x 8 x half>*,
+  ptr,
   <vscale x 8 x i8>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -4343,7 +4343,7 @@ define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <
 entry:
   call void @llvm.riscv.vsoxei.nxv8f16.nxv8i8(
     <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
+    ptr %1,
     <vscale x 8 x i8> %2,
     iXLen %3)
 
@@ -4352,12 +4352,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i8(
   <vscale x 8 x half>,
-  <vscale x 8 x half>*,
+  ptr,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -4366,7 +4366,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i8(
     <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
+    ptr %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
     iXLen %4)
@@ -4376,11 +4376,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv16f16.nxv16i8(
   <vscale x 16 x half>,
-  <vscale x 16 x half>*,
+  ptr,
   <vscale x 16 x i8>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -4389,7 +4389,7 @@ define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %
 entry:
   call void @llvm.riscv.vsoxei.nxv16f16.nxv16i8(
     <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
+    ptr %1,
     <vscale x 16 x i8> %2,
     iXLen %3)
 
@@ -4398,12 +4398,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i8(
   <vscale x 16 x half>,
-  <vscale x 16 x half>*,
+  ptr,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -4412,7 +4412,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x ha
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i8(
     <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
+    ptr %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
     iXLen %4)
@@ -4422,11 +4422,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv32f16.nxv32i8(
   <vscale x 32 x half>,
-  <vscale x 32 x half>*,
+  ptr,
   <vscale x 32 x i8>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -4435,7 +4435,7 @@ define void @intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %
 entry:
   call void @llvm.riscv.vsoxei.nxv32f16.nxv32i8(
     <vscale x 32 x half> %0,
-    <vscale x 32 x half>* %1,
+    ptr %1,
     <vscale x 32 x i8> %2,
     iXLen %3)
 
@@ -4444,12 +4444,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i8(
   <vscale x 32 x half>,
-  <vscale x 32 x half>*,
+  ptr,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -4458,7 +4458,7 @@ define void @intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x ha
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i8(
     <vscale x 32 x half> %0,
-    <vscale x 32 x half>* %1,
+    ptr %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
     iXLen %4)
@@ -4468,11 +4468,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i8(
   <vscale x 1 x float>,
-  <vscale x 1 x float>*,
+  ptr,
   <vscale x 1 x i8>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -4481,7 +4481,7 @@ define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0,
 entry:
   call void @llvm.riscv.vsoxei.nxv1f32.nxv1i8(
     <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
+    ptr %1,
     <vscale x 1 x i8> %2,
     iXLen %3)
 
@@ -4490,12 +4490,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i8(
   <vscale x 1 x float>,
-  <vscale x 1 x float>*,
+  ptr,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -4504,7 +4504,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i8(
     <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
+    ptr %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
     iXLen %4)
@@ -4514,11 +4514,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i8(
   <vscale x 2 x float>,
-  <vscale x 2 x float>*,
+  ptr,
   <vscale x 2 x i8>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -4527,7 +4527,7 @@ define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0,
 entry:
   call void @llvm.riscv.vsoxei.nxv2f32.nxv2i8(
     <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
+    ptr %1,
     <vscale x 2 x i8> %2,
     iXLen %3)
 
@@ -4536,12 +4536,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i8(
   <vscale x 2 x float>,
-  <vscale x 2 x float>*,
+  ptr,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -4550,7 +4550,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i8(
     <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
+    ptr %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
     iXLen %4)
@@ -4560,11 +4560,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i8(
   <vscale x 4 x float>,
-  <vscale x 4 x float>*,
+  ptr,
   <vscale x 4 x i8>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -4573,7 +4573,7 @@ define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0,
 entry:
   call void @llvm.riscv.vsoxei.nxv4f32.nxv4i8(
     <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
+    ptr %1,
     <vscale x 4 x i8> %2,
     iXLen %3)
 
@@ -4582,12 +4582,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i8(
   <vscale x 4 x float>,
-  <vscale x 4 x float>*,
+  ptr,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -4596,7 +4596,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i8(
     <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
+    ptr %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
     iXLen %4)
@@ -4606,11 +4606,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i8(
   <vscale x 8 x float>,
-  <vscale x 8 x float>*,
+  ptr,
   <vscale x 8 x i8>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -4619,7 +4619,7 @@ define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0,
 entry:
   call void @llvm.riscv.vsoxei.nxv8f32.nxv8i8(
     <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
+    ptr %1,
     <vscale x 8 x i8> %2,
     iXLen %3)
 
@@ -4628,12 +4628,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i8(
   <vscale x 8 x float>,
-  <vscale x 8 x float>*,
+  ptr,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -4642,7 +4642,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float>
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i8(
     <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
+    ptr %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
     iXLen %4)
@@ -4652,11 +4652,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv16f32.nxv16i8(
   <vscale x 16 x float>,
-  <vscale x 16 x float>*,
+  ptr,
   <vscale x 16 x i8>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -4665,7 +4665,7 @@ define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float>
 entry:
   call void @llvm.riscv.vsoxei.nxv16f32.nxv16i8(
     <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
+    ptr %1,
     <vscale x 16 x i8> %2,
     iXLen %3)
 
@@ -4674,12 +4674,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i8(
   <vscale x 16 x float>,
-  <vscale x 16 x float>*,
+  ptr,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -4688,7 +4688,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x fl
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i8(
     <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
+    ptr %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
     iXLen %4)
@@ -4698,11 +4698,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i8(
   <vscale x 1 x double>,
-  <vscale x 1 x double>*,
+  ptr,
   <vscale x 1 x i8>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -4711,7 +4711,7 @@ define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0,
 entry:
   call void @llvm.riscv.vsoxei.nxv1f64.nxv1i8(
     <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
+    ptr %1,
     <vscale x 1 x i8> %2,
     iXLen %3)
 
@@ -4720,12 +4720,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i8(
   <vscale x 1 x double>,
-  <vscale x 1 x double>*,
+  ptr,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -4734,7 +4734,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i8(
     <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
+    ptr %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
     iXLen %4)
@@ -4744,11 +4744,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i8(
   <vscale x 2 x double>,
-  <vscale x 2 x double>*,
+  ptr,
   <vscale x 2 x i8>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -4757,7 +4757,7 @@ define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0,
 entry:
   call void @llvm.riscv.vsoxei.nxv2f64.nxv2i8(
     <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
+    ptr %1,
     <vscale x 2 x i8> %2,
     iXLen %3)
 
@@ -4766,12 +4766,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i8(
   <vscale x 2 x double>,
-  <vscale x 2 x double>*,
+  ptr,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -4780,7 +4780,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i8(
     <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
+    ptr %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
     iXLen %4)
@@ -4790,11 +4790,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i8(
   <vscale x 4 x double>,
-  <vscale x 4 x double>*,
+  ptr,
   <vscale x 4 x i8>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -4803,7 +4803,7 @@ define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0,
 entry:
   call void @llvm.riscv.vsoxei.nxv4f64.nxv4i8(
     <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
+    ptr %1,
     <vscale x 4 x i8> %2,
     iXLen %3)
 
@@ -4812,12 +4812,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i8(
   <vscale x 4 x double>,
-  <vscale x 4 x double>*,
+  ptr,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -4826,7 +4826,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i8(
     <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
+    ptr %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
     iXLen %4)
@@ -4836,11 +4836,11 @@ entry:
 
 declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i8(
   <vscale x 8 x double>,
-  <vscale x 8 x double>*,
+  ptr,
   <vscale x 8 x i8>,
   iXLen);
 
-define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -4849,7 +4849,7 @@ define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0,
 entry:
   call void @llvm.riscv.vsoxei.nxv8f64.nxv8i8(
     <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
+    ptr %1,
     <vscale x 8 x i8> %2,
     iXLen %3)
 
@@ -4858,12 +4858,12 @@ entry:
 
 declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i8(
   <vscale x 8 x double>,
-  <vscale x 8 x double>*,
+  ptr,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -4872,7 +4872,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i8(
     <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
+    ptr %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
     iXLen %4)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsse.ll b/llvm/test/CodeGen/RISCV/rvv/vsse.ll
index 418325033648f..9b627bcd66467 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsse.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsse.ll
@@ -6,11 +6,11 @@
 
 declare void @llvm.riscv.vsse.nxv1i64(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   iXLen,
   iXLen);
 
-define void @intrinsic_vsse_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, iXLen %2, iXLen %3) nounwind {
+define void @intrinsic_vsse_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
@@ -19,7 +19,7 @@ define void @intrinsic_vsse_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1
 entry:
   call void @llvm.riscv.vsse.nxv1i64(
     <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
+    ptr %1,
     iXLen %2,
     iXLen %3)
 
@@ -28,12 +28,12 @@ entry:
 
 declare void @llvm.riscv.vsse.mask.nxv1i64(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   iXLen,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
@@ -42,7 +42,7 @@ define void @intrinsic_vsse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscal
 entry:
   call void @llvm.riscv.vsse.mask.nxv1i64(
     <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 1 x i1> %3,
     iXLen %4)
@@ -52,11 +52,11 @@ entry:
 
 declare void @llvm.riscv.vsse.nxv2i64(
   <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
+  ptr,
   iXLen,
   iXLen);
 
-define void @intrinsic_vsse_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, iXLen %2, iXLen %3) nounwind {
+define void @intrinsic_vsse_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
@@ -65,7 +65,7 @@ define void @intrinsic_vsse_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2
 entry:
   call void @llvm.riscv.vsse.nxv2i64(
     <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
+    ptr %1,
     iXLen %2,
     iXLen %3)
 
@@ -74,12 +74,12 @@ entry:
 
 declare void @llvm.riscv.vsse.mask.nxv2i64(
   <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
+  ptr,
   iXLen,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, ptr %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
@@ -88,7 +88,7 @@ define void @intrinsic_vsse_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscal
 entry:
   call void @llvm.riscv.vsse.mask.nxv2i64(
     <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 2 x i1> %3,
     iXLen %4)
@@ -98,11 +98,11 @@ entry:
 
 declare void @llvm.riscv.vsse.nxv4i64(
   <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
+  ptr,
   iXLen,
   iXLen);
 
-define void @intrinsic_vsse_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, iXLen %2, iXLen %3) nounwind {
+define void @intrinsic_vsse_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
@@ -111,7 +111,7 @@ define void @intrinsic_vsse_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4
 entry:
   call void @llvm.riscv.vsse.nxv4i64(
     <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
+    ptr %1,
     iXLen %2,
     iXLen %3)
 
@@ -120,12 +120,12 @@ entry:
 
 declare void @llvm.riscv.vsse.mask.nxv4i64(
   <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
+  ptr,
   iXLen,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, ptr %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
@@ -134,7 +134,7 @@ define void @intrinsic_vsse_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscal
 entry:
   call void @llvm.riscv.vsse.mask.nxv4i64(
     <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 4 x i1> %3,
     iXLen %4)
@@ -144,11 +144,11 @@ entry:
 
 declare void @llvm.riscv.vsse.nxv8i64(
   <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
+  ptr,
   iXLen,
   iXLen);
 
-define void @intrinsic_vsse_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, iXLen %2, iXLen %3) nounwind {
+define void @intrinsic_vsse_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
@@ -157,7 +157,7 @@ define void @intrinsic_vsse_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8
 entry:
   call void @llvm.riscv.vsse.nxv8i64(
     <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
+    ptr %1,
     iXLen %2,
     iXLen %3)
 
@@ -166,12 +166,12 @@ entry:
 
 declare void @llvm.riscv.vsse.mask.nxv8i64(
   <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
+  ptr,
   iXLen,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, ptr %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
@@ -180,7 +180,7 @@ define void @intrinsic_vsse_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscal
 entry:
   call void @llvm.riscv.vsse.mask.nxv8i64(
     <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 8 x i1> %3,
     iXLen %4)
@@ -190,11 +190,11 @@ entry:
 
 declare void @llvm.riscv.vsse.nxv1f64(
   <vscale x 1 x double>,
-  <vscale x 1 x double>*,
+  ptr,
   iXLen,
   iXLen);
 
-define void @intrinsic_vsse_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, iXLen %2, iXLen %3) nounwind {
+define void @intrinsic_vsse_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
@@ -203,7 +203,7 @@ define void @intrinsic_vsse_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale
 entry:
   call void @llvm.riscv.vsse.nxv1f64(
     <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
+    ptr %1,
     iXLen %2,
     iXLen %3)
 
@@ -212,12 +212,12 @@ entry:
 
 declare void @llvm.riscv.vsse.mask.nxv1f64(
   <vscale x 1 x double>,
-  <vscale x 1 x double>*,
+  ptr,
   iXLen,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
@@ -226,7 +226,7 @@ define void @intrinsic_vsse_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vs
 entry:
   call void @llvm.riscv.vsse.mask.nxv1f64(
     <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 1 x i1> %3,
     iXLen %4)
@@ -236,11 +236,11 @@ entry:
 
 declare void @llvm.riscv.vsse.nxv2f64(
   <vscale x 2 x double>,
-  <vscale x 2 x double>*,
+  ptr,
   iXLen,
   iXLen);
 
-define void @intrinsic_vsse_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, iXLen %2, iXLen %3) nounwind {
+define void @intrinsic_vsse_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
@@ -249,7 +249,7 @@ define void @intrinsic_vsse_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale
 entry:
   call void @llvm.riscv.vsse.nxv2f64(
     <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
+    ptr %1,
     iXLen %2,
     iXLen %3)
 
@@ -258,12 +258,12 @@ entry:
 
 declare void @llvm.riscv.vsse.mask.nxv2f64(
   <vscale x 2 x double>,
-  <vscale x 2 x double>*,
+  ptr,
   iXLen,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, ptr %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
@@ -272,7 +272,7 @@ define void @intrinsic_vsse_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vs
 entry:
   call void @llvm.riscv.vsse.mask.nxv2f64(
     <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 2 x i1> %3,
     iXLen %4)
@@ -282,11 +282,11 @@ entry:
 
 declare void @llvm.riscv.vsse.nxv4f64(
   <vscale x 4 x double>,
-  <vscale x 4 x double>*,
+  ptr,
   iXLen,
   iXLen);
 
-define void @intrinsic_vsse_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, iXLen %2, iXLen %3) nounwind {
+define void @intrinsic_vsse_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
@@ -295,7 +295,7 @@ define void @intrinsic_vsse_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale
 entry:
   call void @llvm.riscv.vsse.nxv4f64(
     <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
+    ptr %1,
     iXLen %2,
     iXLen %3)
 
@@ -304,12 +304,12 @@ entry:
 
 declare void @llvm.riscv.vsse.mask.nxv4f64(
   <vscale x 4 x double>,
-  <vscale x 4 x double>*,
+  ptr,
   iXLen,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, ptr %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
@@ -318,7 +318,7 @@ define void @intrinsic_vsse_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vs
 entry:
   call void @llvm.riscv.vsse.mask.nxv4f64(
     <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 4 x i1> %3,
     iXLen %4)
@@ -328,11 +328,11 @@ entry:
 
 declare void @llvm.riscv.vsse.nxv8f64(
   <vscale x 8 x double>,
-  <vscale x 8 x double>*,
+  ptr,
   iXLen,
   iXLen);
 
-define void @intrinsic_vsse_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, iXLen %2, iXLen %3) nounwind {
+define void @intrinsic_vsse_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv8f64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
@@ -341,7 +341,7 @@ define void @intrinsic_vsse_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale
 entry:
   call void @llvm.riscv.vsse.nxv8f64(
     <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
+    ptr %1,
     iXLen %2,
     iXLen %3)
 
@@ -350,12 +350,12 @@ entry:
 
 declare void @llvm.riscv.vsse.mask.nxv8f64(
   <vscale x 8 x double>,
-  <vscale x 8 x double>*,
+  ptr,
   iXLen,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, ptr %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
@@ -364,7 +364,7 @@ define void @intrinsic_vsse_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vs
 entry:
   call void @llvm.riscv.vsse.mask.nxv8f64(
     <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 8 x i1> %3,
     iXLen %4)
@@ -374,11 +374,11 @@ entry:
 
 declare void @llvm.riscv.vsse.nxv1i32(
   <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
+  ptr,
   iXLen,
   iXLen);
 
-define void @intrinsic_vsse_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, iXLen %2, iXLen %3) nounwind {
+define void @intrinsic_vsse_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
@@ -387,7 +387,7 @@ define void @intrinsic_vsse_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1
 entry:
   call void @llvm.riscv.vsse.nxv1i32(
     <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
+    ptr %1,
     iXLen %2,
     iXLen %3)
 
@@ -396,12 +396,12 @@ entry:
 
 declare void @llvm.riscv.vsse.mask.nxv1i32(
   <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
+  ptr,
   iXLen,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
@@ -410,7 +410,7 @@ define void @intrinsic_vsse_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscal
 entry:
   call void @llvm.riscv.vsse.mask.nxv1i32(
     <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 1 x i1> %3,
     iXLen %4)
@@ -420,11 +420,11 @@ entry:
 
 declare void @llvm.riscv.vsse.nxv2i32(
   <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
+  ptr,
   iXLen,
   iXLen);
 
-define void @intrinsic_vsse_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, iXLen %2, iXLen %3) nounwind {
+define void @intrinsic_vsse_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
@@ -433,7 +433,7 @@ define void @intrinsic_vsse_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2
 entry:
   call void @llvm.riscv.vsse.nxv2i32(
     <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
+    ptr %1,
     iXLen %2,
     iXLen %3)
 
@@ -442,12 +442,12 @@ entry:
 
 declare void @llvm.riscv.vsse.mask.nxv2i32(
   <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
+  ptr,
   iXLen,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, ptr %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
@@ -456,7 +456,7 @@ define void @intrinsic_vsse_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscal
 entry:
   call void @llvm.riscv.vsse.mask.nxv2i32(
     <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 2 x i1> %3,
     iXLen %4)
@@ -466,11 +466,11 @@ entry:
 
 declare void @llvm.riscv.vsse.nxv4i32(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   iXLen,
   iXLen);
 
-define void @intrinsic_vsse_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, iXLen %2, iXLen %3) nounwind {
+define void @intrinsic_vsse_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
@@ -479,7 +479,7 @@ define void @intrinsic_vsse_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4
 entry:
   call void @llvm.riscv.vsse.nxv4i32(
     <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
+    ptr %1,
     iXLen %2,
     iXLen %3)
 
@@ -488,12 +488,12 @@ entry:
 
 declare void @llvm.riscv.vsse.mask.nxv4i32(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   iXLen,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, ptr %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
@@ -502,7 +502,7 @@ define void @intrinsic_vsse_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscal
 entry:
   call void @llvm.riscv.vsse.mask.nxv4i32(
     <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 4 x i1> %3,
     iXLen %4)
@@ -512,11 +512,11 @@ entry:
 
 declare void @llvm.riscv.vsse.nxv8i32(
   <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
+  ptr,
   iXLen,
   iXLen);
 
-define void @intrinsic_vsse_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, iXLen %2, iXLen %3) nounwind {
+define void @intrinsic_vsse_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, ma
@@ -525,7 +525,7 @@ define void @intrinsic_vsse_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8
 entry:
   call void @llvm.riscv.vsse.nxv8i32(
     <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
+    ptr %1,
     iXLen %2,
     iXLen %3)
 
@@ -534,12 +534,12 @@ entry:
 
 declare void @llvm.riscv.vsse.mask.nxv8i32(
   <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
+  ptr,
   iXLen,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, ptr %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, ma
@@ -548,7 +548,7 @@ define void @intrinsic_vsse_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscal
 entry:
   call void @llvm.riscv.vsse.mask.nxv8i32(
     <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 8 x i1> %3,
     iXLen %4)
@@ -558,11 +558,11 @@ entry:
 
 declare void @llvm.riscv.vsse.nxv16i32(
   <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
+  ptr,
   iXLen,
   iXLen);
 
-define void @intrinsic_vsse_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, iXLen %2, iXLen %3) nounwind {
+define void @intrinsic_vsse_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
@@ -571,7 +571,7 @@ define void @intrinsic_vsse_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale
 entry:
   call void @llvm.riscv.vsse.nxv16i32(
     <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
+    ptr %1,
     iXLen %2,
     iXLen %3)
 
@@ -580,12 +580,12 @@ entry:
 
 declare void @llvm.riscv.vsse.mask.nxv16i32(
   <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
+  ptr,
   iXLen,
   <vscale x 16 x i1>,
   iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, ptr %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
@@ -594,7 +594,7 @@ define void @intrinsic_vsse_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vs
 entry:
   call void @llvm.riscv.vsse.mask.nxv16i32(
     <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 16 x i1> %3,
     iXLen %4)
@@ -604,11 +604,11 @@ entry:
 
 declare void @llvm.riscv.vsse.nxv1f32(
   <vscale x 1 x float>,
-  <vscale x 1 x float>*,
+  ptr,
   iXLen,
   iXLen);
 
-define void @intrinsic_vsse_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, iXLen %2, iXLen %3) nounwind {
+define void @intrinsic_vsse_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
@@ -617,7 +617,7 @@ define void @intrinsic_vsse_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x
 entry:
   call void @llvm.riscv.vsse.nxv1f32(
     <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
+    ptr %1,
     iXLen %2,
     iXLen %3)
 
@@ -626,12 +626,12 @@ entry:
 
 declare void @llvm.riscv.vsse.mask.nxv1f32(
   <vscale x 1 x float>,
-  <vscale x 1 x float>*,
+  ptr,
   iXLen,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
@@ -640,7 +640,7 @@ define void @intrinsic_vsse_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vsc
 entry:
   call void @llvm.riscv.vsse.mask.nxv1f32(
     <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 1 x i1> %3,
     iXLen %4)
@@ -650,11 +650,11 @@ entry:
 
 declare void @llvm.riscv.vsse.nxv2f32(
   <vscale x 2 x float>,
-  <vscale x 2 x float>*,
+  ptr,
   iXLen,
   iXLen);
 
-define void @intrinsic_vsse_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, iXLen %2, iXLen %3) nounwind {
+define void @intrinsic_vsse_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
@@ -663,7 +663,7 @@ define void @intrinsic_vsse_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x
 entry:
   call void @llvm.riscv.vsse.nxv2f32(
     <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
+    ptr %1,
     iXLen %2,
     iXLen %3)
 
@@ -672,12 +672,12 @@ entry:
 
 declare void @llvm.riscv.vsse.mask.nxv2f32(
   <vscale x 2 x float>,
-  <vscale x 2 x float>*,
+  ptr,
   iXLen,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, ptr %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
@@ -686,7 +686,7 @@ define void @intrinsic_vsse_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vsc
 entry:
   call void @llvm.riscv.vsse.mask.nxv2f32(
     <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 2 x i1> %3,
     iXLen %4)
@@ -696,11 +696,11 @@ entry:
 
 declare void @llvm.riscv.vsse.nxv4f32(
   <vscale x 4 x float>,
-  <vscale x 4 x float>*,
+  ptr,
   iXLen,
   iXLen);
 
-define void @intrinsic_vsse_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, iXLen %2, iXLen %3) nounwind {
+define void @intrinsic_vsse_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
@@ -709,7 +709,7 @@ define void @intrinsic_vsse_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x
 entry:
   call void @llvm.riscv.vsse.nxv4f32(
     <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
+    ptr %1,
     iXLen %2,
     iXLen %3)
 
@@ -718,12 +718,12 @@ entry:
 
 declare void @llvm.riscv.vsse.mask.nxv4f32(
   <vscale x 4 x float>,
-  <vscale x 4 x float>*,
+  ptr,
   iXLen,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, ptr %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
@@ -732,7 +732,7 @@ define void @intrinsic_vsse_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vsc
 entry:
   call void @llvm.riscv.vsse.mask.nxv4f32(
     <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 4 x i1> %3,
     iXLen %4)
@@ -742,11 +742,11 @@ entry:
 
 declare void @llvm.riscv.vsse.nxv8f32(
   <vscale x 8 x float>,
-  <vscale x 8 x float>*,
+  ptr,
   iXLen,
   iXLen);
 
-define void @intrinsic_vsse_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, iXLen %2, iXLen %3) nounwind {
+define void @intrinsic_vsse_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, ma
@@ -755,7 +755,7 @@ define void @intrinsic_vsse_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x
 entry:
   call void @llvm.riscv.vsse.nxv8f32(
     <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
+    ptr %1,
     iXLen %2,
     iXLen %3)
 
@@ -764,12 +764,12 @@ entry:
 
 declare void @llvm.riscv.vsse.mask.nxv8f32(
   <vscale x 8 x float>,
-  <vscale x 8 x float>*,
+  ptr,
   iXLen,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, ptr %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, ma
@@ -778,7 +778,7 @@ define void @intrinsic_vsse_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vsc
 entry:
   call void @llvm.riscv.vsse.mask.nxv8f32(
     <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 8 x i1> %3,
     iXLen %4)
@@ -788,11 +788,11 @@ entry:
 
 declare void @llvm.riscv.vsse.nxv16f32(
   <vscale x 16 x float>,
-  <vscale x 16 x float>*,
+  ptr,
   iXLen,
   iXLen);
 
-define void @intrinsic_vsse_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, iXLen %2, iXLen %3) nounwind {
+define void @intrinsic_vsse_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv16f32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
@@ -801,7 +801,7 @@ define void @intrinsic_vsse_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscal
 entry:
   call void @llvm.riscv.vsse.nxv16f32(
     <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
+    ptr %1,
     iXLen %2,
     iXLen %3)
 
@@ -810,12 +810,12 @@ entry:
 
 declare void @llvm.riscv.vsse.mask.nxv16f32(
   <vscale x 16 x float>,
-  <vscale x 16 x float>*,
+  ptr,
   iXLen,
   <vscale x 16 x i1>,
   iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, ptr %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16f32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
@@ -824,7 +824,7 @@ define void @intrinsic_vsse_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <
 entry:
   call void @llvm.riscv.vsse.mask.nxv16f32(
     <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 16 x i1> %3,
     iXLen %4)
@@ -834,11 +834,11 @@ entry:
 
 declare void @llvm.riscv.vsse.nxv1i16(
   <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
+  ptr,
   iXLen,
   iXLen);
 
-define void @intrinsic_vsse_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, iXLen %2, iXLen %3) nounwind {
+define void @intrinsic_vsse_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
@@ -847,7 +847,7 @@ define void @intrinsic_vsse_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1
 entry:
   call void @llvm.riscv.vsse.nxv1i16(
     <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
+    ptr %1,
     iXLen %2,
     iXLen %3)
 
@@ -856,12 +856,12 @@ entry:
 
 declare void @llvm.riscv.vsse.mask.nxv1i16(
   <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
+  ptr,
   iXLen,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
@@ -870,7 +870,7 @@ define void @intrinsic_vsse_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscal
 entry:
   call void @llvm.riscv.vsse.mask.nxv1i16(
     <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 1 x i1> %3,
     iXLen %4)
@@ -880,11 +880,11 @@ entry:
 
 declare void @llvm.riscv.vsse.nxv2i16(
   <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
+  ptr,
   iXLen,
   iXLen);
 
-define void @intrinsic_vsse_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, iXLen %2, iXLen %3) nounwind {
+define void @intrinsic_vsse_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
@@ -893,7 +893,7 @@ define void @intrinsic_vsse_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2
 entry:
   call void @llvm.riscv.vsse.nxv2i16(
     <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
+    ptr %1,
     iXLen %2,
     iXLen %3)
 
@@ -902,12 +902,12 @@ entry:
 
 declare void @llvm.riscv.vsse.mask.nxv2i16(
   <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
+  ptr,
   iXLen,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, ptr %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
@@ -916,7 +916,7 @@ define void @intrinsic_vsse_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscal
 entry:
   call void @llvm.riscv.vsse.mask.nxv2i16(
     <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 2 x i1> %3,
     iXLen %4)
@@ -926,11 +926,11 @@ entry:
 
 declare void @llvm.riscv.vsse.nxv4i16(
   <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
+  ptr,
   iXLen,
   iXLen);
 
-define void @intrinsic_vsse_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, iXLen %2, iXLen %3) nounwind {
+define void @intrinsic_vsse_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
@@ -939,7 +939,7 @@ define void @intrinsic_vsse_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4
 entry:
   call void @llvm.riscv.vsse.nxv4i16(
     <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
+    ptr %1,
     iXLen %2,
     iXLen %3)
 
@@ -948,12 +948,12 @@ entry:
 
 declare void @llvm.riscv.vsse.mask.nxv4i16(
   <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
+  ptr,
   iXLen,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, ptr %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
@@ -962,7 +962,7 @@ define void @intrinsic_vsse_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscal
 entry:
   call void @llvm.riscv.vsse.mask.nxv4i16(
     <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 4 x i1> %3,
     iXLen %4)
@@ -972,11 +972,11 @@ entry:
 
 declare void @llvm.riscv.vsse.nxv8i16(
   <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
+  ptr,
   iXLen,
   iXLen);
 
-define void @intrinsic_vsse_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, iXLen %2, iXLen %3) nounwind {
+define void @intrinsic_vsse_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, ma
@@ -985,7 +985,7 @@ define void @intrinsic_vsse_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8
 entry:
   call void @llvm.riscv.vsse.nxv8i16(
     <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
+    ptr %1,
     iXLen %2,
     iXLen %3)
 
@@ -994,12 +994,12 @@ entry:
 
 declare void @llvm.riscv.vsse.mask.nxv8i16(
   <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
+  ptr,
   iXLen,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, ptr %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, ma
@@ -1008,7 +1008,7 @@ define void @intrinsic_vsse_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscal
 entry:
   call void @llvm.riscv.vsse.mask.nxv8i16(
     <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 8 x i1> %3,
     iXLen %4)
@@ -1018,11 +1018,11 @@ entry:
 
 declare void @llvm.riscv.vsse.nxv16i16(
   <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
+  ptr,
   iXLen,
   iXLen);
 
-define void @intrinsic_vsse_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, iXLen %2, iXLen %3) nounwind {
+define void @intrinsic_vsse_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
@@ -1031,7 +1031,7 @@ define void @intrinsic_vsse_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale
 entry:
   call void @llvm.riscv.vsse.nxv16i16(
     <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
+    ptr %1,
     iXLen %2,
     iXLen %3)
 
@@ -1040,12 +1040,12 @@ entry:
 
 declare void @llvm.riscv.vsse.mask.nxv16i16(
   <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
+  ptr,
   iXLen,
   <vscale x 16 x i1>,
   iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, ptr %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
@@ -1054,7 +1054,7 @@ define void @intrinsic_vsse_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vs
 entry:
   call void @llvm.riscv.vsse.mask.nxv16i16(
     <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 16 x i1> %3,
     iXLen %4)
@@ -1064,11 +1064,11 @@ entry:
 
 declare void @llvm.riscv.vsse.nxv32i16(
   <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
+  ptr,
   iXLen,
   iXLen);
 
-define void @intrinsic_vsse_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, iXLen %2, iXLen %3) nounwind {
+define void @intrinsic_vsse_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m8, ta, ma
@@ -1077,7 +1077,7 @@ define void @intrinsic_vsse_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale
 entry:
   call void @llvm.riscv.vsse.nxv32i16(
     <vscale x 32 x i16> %0,
-    <vscale x 32 x i16>* %1,
+    ptr %1,
     iXLen %2,
     iXLen %3)
 
@@ -1086,12 +1086,12 @@ entry:
 
 declare void @llvm.riscv.vsse.mask.nxv32i16(
   <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
+  ptr,
   iXLen,
   <vscale x 32 x i1>,
   iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, ptr %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m8, ta, ma
@@ -1100,7 +1100,7 @@ define void @intrinsic_vsse_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vs
 entry:
   call void @llvm.riscv.vsse.mask.nxv32i16(
     <vscale x 32 x i16> %0,
-    <vscale x 32 x i16>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 32 x i1> %3,
     iXLen %4)
@@ -1110,11 +1110,11 @@ entry:
 
 declare void @llvm.riscv.vsse.nxv1f16(
   <vscale x 1 x half>,
-  <vscale x 1 x half>*,
+  ptr,
   iXLen,
   iXLen);
 
-define void @intrinsic_vsse_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, iXLen %2, iXLen %3) nounwind {
+define void @intrinsic_vsse_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
@@ -1123,7 +1123,7 @@ define void @intrinsic_vsse_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x
 entry:
   call void @llvm.riscv.vsse.nxv1f16(
     <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
+    ptr %1,
     iXLen %2,
     iXLen %3)
 
@@ -1132,12 +1132,12 @@ entry:
 
 declare void @llvm.riscv.vsse.mask.nxv1f16(
   <vscale x 1 x half>,
-  <vscale x 1 x half>*,
+  ptr,
   iXLen,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
@@ -1146,7 +1146,7 @@ define void @intrinsic_vsse_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vsca
 entry:
   call void @llvm.riscv.vsse.mask.nxv1f16(
     <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 1 x i1> %3,
     iXLen %4)
@@ -1156,11 +1156,11 @@ entry:
 
 declare void @llvm.riscv.vsse.nxv2f16(
   <vscale x 2 x half>,
-  <vscale x 2 x half>*,
+  ptr,
   iXLen,
   iXLen);
 
-define void @intrinsic_vsse_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, iXLen %2, iXLen %3) nounwind {
+define void @intrinsic_vsse_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
@@ -1169,7 +1169,7 @@ define void @intrinsic_vsse_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x
 entry:
   call void @llvm.riscv.vsse.nxv2f16(
     <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
+    ptr %1,
     iXLen %2,
     iXLen %3)
 
@@ -1178,12 +1178,12 @@ entry:
 
 declare void @llvm.riscv.vsse.mask.nxv2f16(
   <vscale x 2 x half>,
-  <vscale x 2 x half>*,
+  ptr,
   iXLen,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, ptr %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
@@ -1192,7 +1192,7 @@ define void @intrinsic_vsse_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vsca
 entry:
   call void @llvm.riscv.vsse.mask.nxv2f16(
     <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 2 x i1> %3,
     iXLen %4)
@@ -1202,11 +1202,11 @@ entry:
 
 declare void @llvm.riscv.vsse.nxv4f16(
   <vscale x 4 x half>,
-  <vscale x 4 x half>*,
+  ptr,
   iXLen,
   iXLen);
 
-define void @intrinsic_vsse_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, iXLen %2, iXLen %3) nounwind {
+define void @intrinsic_vsse_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
@@ -1215,7 +1215,7 @@ define void @intrinsic_vsse_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x
 entry:
   call void @llvm.riscv.vsse.nxv4f16(
     <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
+    ptr %1,
     iXLen %2,
     iXLen %3)
 
@@ -1224,12 +1224,12 @@ entry:
 
 declare void @llvm.riscv.vsse.mask.nxv4f16(
   <vscale x 4 x half>,
-  <vscale x 4 x half>*,
+  ptr,
   iXLen,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, ptr %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
@@ -1238,7 +1238,7 @@ define void @intrinsic_vsse_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vsca
 entry:
   call void @llvm.riscv.vsse.mask.nxv4f16(
     <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 4 x i1> %3,
     iXLen %4)
@@ -1248,11 +1248,11 @@ entry:
 
 declare void @llvm.riscv.vsse.nxv8f16(
   <vscale x 8 x half>,
-  <vscale x 8 x half>*,
+  ptr,
   iXLen,
   iXLen);
 
-define void @intrinsic_vsse_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, iXLen %2, iXLen %3) nounwind {
+define void @intrinsic_vsse_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, ma
@@ -1261,7 +1261,7 @@ define void @intrinsic_vsse_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x
 entry:
   call void @llvm.riscv.vsse.nxv8f16(
     <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
+    ptr %1,
     iXLen %2,
     iXLen %3)
 
@@ -1270,12 +1270,12 @@ entry:
 
 declare void @llvm.riscv.vsse.mask.nxv8f16(
   <vscale x 8 x half>,
-  <vscale x 8 x half>*,
+  ptr,
   iXLen,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, ptr %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, ma
@@ -1284,7 +1284,7 @@ define void @intrinsic_vsse_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vsca
 entry:
   call void @llvm.riscv.vsse.mask.nxv8f16(
     <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 8 x i1> %3,
     iXLen %4)
@@ -1294,11 +1294,11 @@ entry:
 
 declare void @llvm.riscv.vsse.nxv16f16(
   <vscale x 16 x half>,
-  <vscale x 16 x half>*,
+  ptr,
   iXLen,
   iXLen);
 
-define void @intrinsic_vsse_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, iXLen %2, iXLen %3) nounwind {
+define void @intrinsic_vsse_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
@@ -1307,7 +1307,7 @@ define void @intrinsic_vsse_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale
 entry:
   call void @llvm.riscv.vsse.nxv16f16(
     <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
+    ptr %1,
     iXLen %2,
     iXLen %3)
 
@@ -1316,12 +1316,12 @@ entry:
 
 declare void @llvm.riscv.vsse.mask.nxv16f16(
   <vscale x 16 x half>,
-  <vscale x 16 x half>*,
+  ptr,
   iXLen,
   <vscale x 16 x i1>,
   iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, ptr %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
@@ -1330,7 +1330,7 @@ define void @intrinsic_vsse_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <v
 entry:
   call void @llvm.riscv.vsse.mask.nxv16f16(
     <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 16 x i1> %3,
     iXLen %4)
@@ -1340,11 +1340,11 @@ entry:
 
 declare void @llvm.riscv.vsse.nxv32f16(
   <vscale x 32 x half>,
-  <vscale x 32 x half>*,
+  ptr,
   iXLen,
   iXLen);
 
-define void @intrinsic_vsse_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, iXLen %2, iXLen %3) nounwind {
+define void @intrinsic_vsse_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv32f16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m8, ta, ma
@@ -1353,7 +1353,7 @@ define void @intrinsic_vsse_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale
 entry:
   call void @llvm.riscv.vsse.nxv32f16(
     <vscale x 32 x half> %0,
-    <vscale x 32 x half>* %1,
+    ptr %1,
     iXLen %2,
     iXLen %3)
 
@@ -1362,12 +1362,12 @@ entry:
 
 declare void @llvm.riscv.vsse.mask.nxv32f16(
   <vscale x 32 x half>,
-  <vscale x 32 x half>*,
+  ptr,
   iXLen,
   <vscale x 32 x i1>,
   iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, ptr %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32f16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m8, ta, ma
@@ -1376,7 +1376,7 @@ define void @intrinsic_vsse_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <v
 entry:
   call void @llvm.riscv.vsse.mask.nxv32f16(
     <vscale x 32 x half> %0,
-    <vscale x 32 x half>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 32 x i1> %3,
     iXLen %4)
@@ -1386,11 +1386,11 @@ entry:
 
 declare void @llvm.riscv.vsse.nxv1i8(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   iXLen,
   iXLen);
 
-define void @intrinsic_vsse_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, iXLen %2, iXLen %3) nounwind {
+define void @intrinsic_vsse_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
@@ -1399,7 +1399,7 @@ define void @intrinsic_vsse_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x
 entry:
   call void @llvm.riscv.vsse.nxv1i8(
     <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
+    ptr %1,
     iXLen %2,
     iXLen %3)
 
@@ -1408,12 +1408,12 @@ entry:
 
 declare void @llvm.riscv.vsse.mask.nxv1i8(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   iXLen,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
@@ -1422,7 +1422,7 @@ define void @intrinsic_vsse_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x
 entry:
   call void @llvm.riscv.vsse.mask.nxv1i8(
     <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 1 x i1> %3,
     iXLen %4)
@@ -1432,11 +1432,11 @@ entry:
 
 declare void @llvm.riscv.vsse.nxv2i8(
   <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
+  ptr,
   iXLen,
   iXLen);
 
-define void @intrinsic_vsse_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, iXLen %2, iXLen %3) nounwind {
+define void @intrinsic_vsse_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, ma
@@ -1445,7 +1445,7 @@ define void @intrinsic_vsse_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x
 entry:
   call void @llvm.riscv.vsse.nxv2i8(
     <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
+    ptr %1,
     iXLen %2,
     iXLen %3)
 
@@ -1454,12 +1454,12 @@ entry:
 
 declare void @llvm.riscv.vsse.mask.nxv2i8(
   <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
+  ptr,
   iXLen,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, ptr %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, ma
@@ -1468,7 +1468,7 @@ define void @intrinsic_vsse_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x
 entry:
   call void @llvm.riscv.vsse.mask.nxv2i8(
     <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 2 x i1> %3,
     iXLen %4)
@@ -1478,11 +1478,11 @@ entry:
 
 declare void @llvm.riscv.vsse.nxv4i8(
   <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
+  ptr,
   iXLen,
   iXLen);
 
-define void @intrinsic_vsse_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, iXLen %2, iXLen %3) nounwind {
+define void @intrinsic_vsse_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, ma
@@ -1491,7 +1491,7 @@ define void @intrinsic_vsse_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x
 entry:
   call void @llvm.riscv.vsse.nxv4i8(
     <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
+    ptr %1,
     iXLen %2,
     iXLen %3)
 
@@ -1500,12 +1500,12 @@ entry:
 
 declare void @llvm.riscv.vsse.mask.nxv4i8(
   <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
+  ptr,
   iXLen,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, ptr %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, ma
@@ -1514,7 +1514,7 @@ define void @intrinsic_vsse_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x
 entry:
   call void @llvm.riscv.vsse.mask.nxv4i8(
     <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 4 x i1> %3,
     iXLen %4)
@@ -1524,11 +1524,11 @@ entry:
 
 declare void @llvm.riscv.vsse.nxv8i8(
   <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
+  ptr,
   iXLen,
   iXLen);
 
-define void @intrinsic_vsse_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, iXLen %2, iXLen %3) nounwind {
+define void @intrinsic_vsse_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, ma
@@ -1537,7 +1537,7 @@ define void @intrinsic_vsse_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x
 entry:
   call void @llvm.riscv.vsse.nxv8i8(
     <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
+    ptr %1,
     iXLen %2,
     iXLen %3)
 
@@ -1546,12 +1546,12 @@ entry:
 
 declare void @llvm.riscv.vsse.mask.nxv8i8(
   <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
+  ptr,
   iXLen,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, ptr %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, ma
@@ -1560,7 +1560,7 @@ define void @intrinsic_vsse_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x
 entry:
   call void @llvm.riscv.vsse.mask.nxv8i8(
     <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 8 x i1> %3,
     iXLen %4)
@@ -1570,11 +1570,11 @@ entry:
 
 declare void @llvm.riscv.vsse.nxv16i8(
   <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
+  ptr,
   iXLen,
   iXLen);
 
-define void @intrinsic_vsse_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, iXLen %2, iXLen %3) nounwind {
+define void @intrinsic_vsse_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, ma
@@ -1583,7 +1583,7 @@ define void @intrinsic_vsse_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 1
 entry:
   call void @llvm.riscv.vsse.nxv16i8(
     <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
+    ptr %1,
     iXLen %2,
     iXLen %3)
 
@@ -1592,12 +1592,12 @@ entry:
 
 declare void @llvm.riscv.vsse.mask.nxv16i8(
   <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
+  ptr,
   iXLen,
   <vscale x 16 x i1>,
   iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, ptr %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, ma
@@ -1606,7 +1606,7 @@ define void @intrinsic_vsse_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscal
 entry:
   call void @llvm.riscv.vsse.mask.nxv16i8(
     <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 16 x i1> %3,
     iXLen %4)
@@ -1616,11 +1616,11 @@ entry:
 
 declare void @llvm.riscv.vsse.nxv32i8(
   <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
+  ptr,
   iXLen,
   iXLen);
 
-define void @intrinsic_vsse_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, iXLen %2, iXLen %3) nounwind {
+define void @intrinsic_vsse_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m4, ta, ma
@@ -1629,7 +1629,7 @@ define void @intrinsic_vsse_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 3
 entry:
   call void @llvm.riscv.vsse.nxv32i8(
     <vscale x 32 x i8> %0,
-    <vscale x 32 x i8>* %1,
+    ptr %1,
     iXLen %2,
     iXLen %3)
 
@@ -1638,12 +1638,12 @@ entry:
 
 declare void @llvm.riscv.vsse.mask.nxv32i8(
   <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
+  ptr,
   iXLen,
   <vscale x 32 x i1>,
   iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, ptr %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m4, ta, ma
@@ -1652,7 +1652,7 @@ define void @intrinsic_vsse_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscal
 entry:
   call void @llvm.riscv.vsse.mask.nxv32i8(
     <vscale x 32 x i8> %0,
-    <vscale x 32 x i8>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 32 x i1> %3,
     iXLen %4)
@@ -1662,11 +1662,11 @@ entry:
 
 declare void @llvm.riscv.vsse.nxv64i8(
   <vscale x 64 x i8>,
-  <vscale x 64 x i8>*,
+  ptr,
   iXLen,
   iXLen);
 
-define void @intrinsic_vsse_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, iXLen %2, iXLen %3) nounwind {
+define void @intrinsic_vsse_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
@@ -1675,7 +1675,7 @@ define void @intrinsic_vsse_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 6
 entry:
   call void @llvm.riscv.vsse.nxv64i8(
     <vscale x 64 x i8> %0,
-    <vscale x 64 x i8>* %1,
+    ptr %1,
     iXLen %2,
     iXLen %3)
 
@@ -1684,12 +1684,12 @@ entry:
 
 declare void @llvm.riscv.vsse.mask.nxv64i8(
   <vscale x 64 x i8>,
-  <vscale x 64 x i8>*,
+  ptr,
   iXLen,
   <vscale x 64 x i1>,
   iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, iXLen %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, ptr %1, iXLen %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
@@ -1698,7 +1698,7 @@ define void @intrinsic_vsse_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscal
 entry:
   call void @llvm.riscv.vsse.mask.nxv64i8(
     <vscale x 64 x i8> %0,
-    <vscale x 64 x i8>* %1,
+    ptr %1,
     iXLen %2,
     <vscale x 64 x i1> %3,
     iXLen %4)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll
index 9dfb53e933d8b..dbc7e719f14a8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll
@@ -6,11 +6,11 @@
 
 declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i64(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   <vscale x 1 x i64>,
   i64);
 
-define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -19,7 +19,7 @@ define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, <vsc
 entry:
   call void @llvm.riscv.vsuxei.nxv1i8.nxv1i64(
     <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
+    ptr %1,
     <vscale x 1 x i64> %2,
     i64 %3)
 
@@ -28,12 +28,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
   i64);
 
-define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -42,7 +42,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0,
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64(
     <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
+    ptr %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
     i64 %4)
@@ -52,11 +52,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i64(
   <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
+  ptr,
   <vscale x 2 x i64>,
   i64);
 
-define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -65,7 +65,7 @@ define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, <vsc
 entry:
   call void @llvm.riscv.vsuxei.nxv2i8.nxv2i64(
     <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
+    ptr %1,
     <vscale x 2 x i64> %2,
     i64 %3)
 
@@ -74,12 +74,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i64(
   <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
+  ptr,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
   i64);
 
-define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -88,7 +88,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0,
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i64(
     <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
+    ptr %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
     i64 %4)
@@ -98,11 +98,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i64(
   <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
+  ptr,
   <vscale x 4 x i64>,
   i64);
 
-define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -111,7 +111,7 @@ define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, <vsc
 entry:
   call void @llvm.riscv.vsuxei.nxv4i8.nxv4i64(
     <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
+    ptr %1,
     <vscale x 4 x i64> %2,
     i64 %3)
 
@@ -120,12 +120,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i64(
   <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
+  ptr,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
   i64);
 
-define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -134,7 +134,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0,
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i64(
     <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
+    ptr %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
     i64 %4)
@@ -144,11 +144,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i64(
   <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
+  ptr,
   <vscale x 8 x i64>,
   i64);
 
-define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -157,7 +157,7 @@ define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, <vsc
 entry:
   call void @llvm.riscv.vsuxei.nxv8i8.nxv8i64(
     <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
+    ptr %1,
     <vscale x 8 x i64> %2,
     i64 %3)
 
@@ -166,12 +166,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i64(
   <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
+  ptr,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
   i64);
 
-define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -180,7 +180,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0,
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i64(
     <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
+    ptr %1,
     <vscale x 8 x i64> %2,
     <vscale x 8 x i1> %3,
     i64 %4)
@@ -190,11 +190,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i64(
   <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
+  ptr,
   <vscale x 1 x i64>,
   i64);
 
-define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -203,7 +203,7 @@ define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, <
 entry:
   call void @llvm.riscv.vsuxei.nxv1i16.nxv1i64(
     <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
+    ptr %1,
     <vscale x 1 x i64> %2,
     i64 %3)
 
@@ -212,12 +212,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i64(
   <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
+  ptr,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
   i64);
 
-define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -226,7 +226,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i64(
     <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
+    ptr %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
     i64 %4)
@@ -236,11 +236,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i64(
   <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
+  ptr,
   <vscale x 2 x i64>,
   i64);
 
-define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -249,7 +249,7 @@ define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, <
 entry:
   call void @llvm.riscv.vsuxei.nxv2i16.nxv2i64(
     <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
+    ptr %1,
     <vscale x 2 x i64> %2,
     i64 %3)
 
@@ -258,12 +258,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i64(
   <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
+  ptr,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
   i64);
 
-define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -272,7 +272,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i64(
     <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
+    ptr %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
     i64 %4)
@@ -282,11 +282,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i64(
   <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
+  ptr,
   <vscale x 4 x i64>,
   i64);
 
-define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -295,7 +295,7 @@ define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, <
 entry:
   call void @llvm.riscv.vsuxei.nxv4i16.nxv4i64(
     <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
+    ptr %1,
     <vscale x 4 x i64> %2,
     i64 %3)
 
@@ -304,12 +304,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i64(
   <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
+  ptr,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
   i64);
 
-define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -318,7 +318,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i64(
     <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
+    ptr %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
     i64 %4)
@@ -328,11 +328,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i64(
   <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
+  ptr,
   <vscale x 8 x i64>,
   i64);
 
-define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -341,7 +341,7 @@ define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, <
 entry:
   call void @llvm.riscv.vsuxei.nxv8i16.nxv8i64(
     <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
+    ptr %1,
     <vscale x 8 x i64> %2,
     i64 %3)
 
@@ -350,12 +350,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i64(
   <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
+  ptr,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
   i64);
 
-define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -364,7 +364,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i64(
     <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
+    ptr %1,
     <vscale x 8 x i64> %2,
     <vscale x 8 x i1> %3,
     i64 %4)
@@ -374,11 +374,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i64(
   <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
+  ptr,
   <vscale x 1 x i64>,
   i64);
 
-define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -387,7 +387,7 @@ define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <
 entry:
   call void @llvm.riscv.vsuxei.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
+    ptr %1,
     <vscale x 1 x i64> %2,
     i64 %3)
 
@@ -396,12 +396,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i64(
   <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
+  ptr,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
   i64);
 
-define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -410,7 +410,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
+    ptr %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
     i64 %4)
@@ -420,11 +420,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i64(
   <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
+  ptr,
   <vscale x 2 x i64>,
   i64);
 
-define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -433,7 +433,7 @@ define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <
 entry:
   call void @llvm.riscv.vsuxei.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
+    ptr %1,
     <vscale x 2 x i64> %2,
     i64 %3)
 
@@ -442,12 +442,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i64(
   <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
+  ptr,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
   i64);
 
-define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -456,7 +456,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
+    ptr %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
     i64 %4)
@@ -466,11 +466,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i64(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   <vscale x 4 x i64>,
   i64);
 
-define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -479,7 +479,7 @@ define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <
 entry:
   call void @llvm.riscv.vsuxei.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
+    ptr %1,
     <vscale x 4 x i64> %2,
     i64 %3)
 
@@ -488,12 +488,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
   i64);
 
-define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -502,7 +502,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
+    ptr %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
     i64 %4)
@@ -512,11 +512,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i64(
   <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
+  ptr,
   <vscale x 8 x i64>,
   i64);
 
-define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -525,7 +525,7 @@ define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <
 entry:
   call void @llvm.riscv.vsuxei.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
+    ptr %1,
     <vscale x 8 x i64> %2,
     i64 %3)
 
@@ -534,12 +534,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i64(
   <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
+  ptr,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
   i64);
 
-define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -548,7 +548,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
+    ptr %1,
     <vscale x 8 x i64> %2,
     <vscale x 8 x i1> %3,
     i64 %4)
@@ -558,11 +558,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   <vscale x 1 x i64>,
   i64);
 
-define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -571,7 +571,7 @@ define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <
 entry:
   call void @llvm.riscv.vsuxei.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
+    ptr %1,
     <vscale x 1 x i64> %2,
     i64 %3)
 
@@ -580,12 +580,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
   i64);
 
-define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -594,7 +594,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
+    ptr %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
     i64 %4)
@@ -604,11 +604,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
+  ptr,
   <vscale x 2 x i64>,
   i64);
 
-define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -617,7 +617,7 @@ define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <
 entry:
   call void @llvm.riscv.vsuxei.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
+    ptr %1,
     <vscale x 2 x i64> %2,
     i64 %3)
 
@@ -626,12 +626,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
+  ptr,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
   i64);
 
-define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -640,7 +640,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
+    ptr %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
     i64 %4)
@@ -650,11 +650,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
+  ptr,
   <vscale x 4 x i64>,
   i64);
 
-define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -663,7 +663,7 @@ define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <
 entry:
   call void @llvm.riscv.vsuxei.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
+    ptr %1,
     <vscale x 4 x i64> %2,
     i64 %3)
 
@@ -672,12 +672,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
+  ptr,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
   i64);
 
-define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -686,7 +686,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
+    ptr %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
     i64 %4)
@@ -696,11 +696,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i64(
   <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
+  ptr,
   <vscale x 8 x i64>,
   i64);
 
-define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -709,7 +709,7 @@ define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <
 entry:
   call void @llvm.riscv.vsuxei.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
+    ptr %1,
     <vscale x 8 x i64> %2,
     i64 %3)
 
@@ -718,12 +718,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i64(
   <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
+  ptr,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
   i64);
 
-define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -732,7 +732,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
+    ptr %1,
     <vscale x 8 x i64> %2,
     <vscale x 8 x i1> %3,
     i64 %4)
@@ -742,11 +742,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i64(
   <vscale x 1 x half>,
-  <vscale x 1 x half>*,
+  ptr,
   <vscale x 1 x i64>,
   i64);
 
-define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -755,7 +755,7 @@ define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0,
 entry:
   call void @llvm.riscv.vsuxei.nxv1f16.nxv1i64(
     <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
+    ptr %1,
     <vscale x 1 x i64> %2,
     i64 %3)
 
@@ -764,12 +764,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i64(
   <vscale x 1 x half>,
-  <vscale x 1 x half>*,
+  ptr,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
   i64);
 
-define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -778,7 +778,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i64(
     <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
+    ptr %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
     i64 %4)
@@ -788,11 +788,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i64(
   <vscale x 2 x half>,
-  <vscale x 2 x half>*,
+  ptr,
   <vscale x 2 x i64>,
   i64);
 
-define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -801,7 +801,7 @@ define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0,
 entry:
   call void @llvm.riscv.vsuxei.nxv2f16.nxv2i64(
     <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
+    ptr %1,
     <vscale x 2 x i64> %2,
     i64 %3)
 
@@ -810,12 +810,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i64(
   <vscale x 2 x half>,
-  <vscale x 2 x half>*,
+  ptr,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
   i64);
 
-define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -824,7 +824,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i64(
     <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
+    ptr %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
     i64 %4)
@@ -834,11 +834,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i64(
   <vscale x 4 x half>,
-  <vscale x 4 x half>*,
+  ptr,
   <vscale x 4 x i64>,
   i64);
 
-define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -847,7 +847,7 @@ define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0,
 entry:
   call void @llvm.riscv.vsuxei.nxv4f16.nxv4i64(
     <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
+    ptr %1,
     <vscale x 4 x i64> %2,
     i64 %3)
 
@@ -856,12 +856,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i64(
   <vscale x 4 x half>,
-  <vscale x 4 x half>*,
+  ptr,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
   i64);
 
-define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -870,7 +870,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i64(
     <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
+    ptr %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
     i64 %4)
@@ -880,11 +880,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i64(
   <vscale x 8 x half>,
-  <vscale x 8 x half>*,
+  ptr,
   <vscale x 8 x i64>,
   i64);
 
-define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -893,7 +893,7 @@ define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0,
 entry:
   call void @llvm.riscv.vsuxei.nxv8f16.nxv8i64(
     <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
+    ptr %1,
     <vscale x 8 x i64> %2,
     i64 %3)
 
@@ -902,12 +902,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i64(
   <vscale x 8 x half>,
-  <vscale x 8 x half>*,
+  ptr,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
   i64);
 
-define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -916,7 +916,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i64(
     <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
+    ptr %1,
     <vscale x 8 x i64> %2,
     <vscale x 8 x i1> %3,
     i64 %4)
@@ -926,11 +926,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i64(
   <vscale x 1 x float>,
-  <vscale x 1 x float>*,
+  ptr,
   <vscale x 1 x i64>,
   i64);
 
-define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -939,7 +939,7 @@ define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0,
 entry:
   call void @llvm.riscv.vsuxei.nxv1f32.nxv1i64(
     <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
+    ptr %1,
     <vscale x 1 x i64> %2,
     i64 %3)
 
@@ -948,12 +948,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i64(
   <vscale x 1 x float>,
-  <vscale x 1 x float>*,
+  ptr,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
   i64);
 
-define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -962,7 +962,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i64(
     <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
+    ptr %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
     i64 %4)
@@ -972,11 +972,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i64(
   <vscale x 2 x float>,
-  <vscale x 2 x float>*,
+  ptr,
   <vscale x 2 x i64>,
   i64);
 
-define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -985,7 +985,7 @@ define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0,
 entry:
   call void @llvm.riscv.vsuxei.nxv2f32.nxv2i64(
     <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
+    ptr %1,
     <vscale x 2 x i64> %2,
     i64 %3)
 
@@ -994,12 +994,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i64(
   <vscale x 2 x float>,
-  <vscale x 2 x float>*,
+  ptr,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
   i64);
 
-define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1008,7 +1008,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i64(
     <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
+    ptr %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
     i64 %4)
@@ -1018,11 +1018,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i64(
   <vscale x 4 x float>,
-  <vscale x 4 x float>*,
+  ptr,
   <vscale x 4 x i64>,
   i64);
 
-define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1031,7 +1031,7 @@ define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0,
 entry:
   call void @llvm.riscv.vsuxei.nxv4f32.nxv4i64(
     <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
+    ptr %1,
     <vscale x 4 x i64> %2,
     i64 %3)
 
@@ -1040,12 +1040,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i64(
   <vscale x 4 x float>,
-  <vscale x 4 x float>*,
+  ptr,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
   i64);
 
-define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1054,7 +1054,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i64(
     <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
+    ptr %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
     i64 %4)
@@ -1064,11 +1064,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i64(
   <vscale x 8 x float>,
-  <vscale x 8 x float>*,
+  ptr,
   <vscale x 8 x i64>,
   i64);
 
-define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1077,7 +1077,7 @@ define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0,
 entry:
   call void @llvm.riscv.vsuxei.nxv8f32.nxv8i64(
     <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
+    ptr %1,
     <vscale x 8 x i64> %2,
     i64 %3)
 
@@ -1086,12 +1086,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i64(
   <vscale x 8 x float>,
-  <vscale x 8 x float>*,
+  ptr,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
   i64);
 
-define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1100,7 +1100,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i64(
     <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
+    ptr %1,
     <vscale x 8 x i64> %2,
     <vscale x 8 x i1> %3,
     i64 %4)
@@ -1110,11 +1110,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i64(
   <vscale x 1 x double>,
-  <vscale x 1 x double>*,
+  ptr,
   <vscale x 1 x i64>,
   i64);
 
-define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -1123,7 +1123,7 @@ define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0
 entry:
   call void @llvm.riscv.vsuxei.nxv1f64.nxv1i64(
     <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
+    ptr %1,
     <vscale x 1 x i64> %2,
     i64 %3)
 
@@ -1132,12 +1132,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i64(
   <vscale x 1 x double>,
-  <vscale x 1 x double>*,
+  ptr,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
   i64);
 
-define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -1146,7 +1146,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x doubl
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i64(
     <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
+    ptr %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
     i64 %4)
@@ -1156,11 +1156,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i64(
   <vscale x 2 x double>,
-  <vscale x 2 x double>*,
+  ptr,
   <vscale x 2 x i64>,
   i64);
 
-define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -1169,7 +1169,7 @@ define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0
 entry:
   call void @llvm.riscv.vsuxei.nxv2f64.nxv2i64(
     <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
+    ptr %1,
     <vscale x 2 x i64> %2,
     i64 %3)
 
@@ -1178,12 +1178,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i64(
   <vscale x 2 x double>,
-  <vscale x 2 x double>*,
+  ptr,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
   i64);
 
-define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -1192,7 +1192,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x doubl
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i64(
     <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
+    ptr %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
     i64 %4)
@@ -1202,11 +1202,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i64(
   <vscale x 4 x double>,
-  <vscale x 4 x double>*,
+  ptr,
   <vscale x 4 x i64>,
   i64);
 
-define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -1215,7 +1215,7 @@ define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0
 entry:
   call void @llvm.riscv.vsuxei.nxv4f64.nxv4i64(
     <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
+    ptr %1,
     <vscale x 4 x i64> %2,
     i64 %3)
 
@@ -1224,12 +1224,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i64(
   <vscale x 4 x double>,
-  <vscale x 4 x double>*,
+  ptr,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
   i64);
 
-define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -1238,7 +1238,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x doubl
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i64(
     <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
+    ptr %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
     i64 %4)
@@ -1248,11 +1248,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i64(
   <vscale x 8 x double>,
-  <vscale x 8 x double>*,
+  ptr,
   <vscale x 8 x i64>,
   i64);
 
-define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -1261,7 +1261,7 @@ define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0
 entry:
   call void @llvm.riscv.vsuxei.nxv8f64.nxv8i64(
     <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
+    ptr %1,
     <vscale x 8 x i64> %2,
     i64 %3)
 
@@ -1270,12 +1270,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i64(
   <vscale x 8 x double>,
-  <vscale x 8 x double>*,
+  ptr,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
   i64);
 
-define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -1284,7 +1284,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x doubl
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i64(
     <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
+    ptr %1,
     <vscale x 8 x i64> %2,
     <vscale x 8 x i1> %3,
     i64 %4)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsuxei.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxei.ll
index dd7d91f7cf2a8..7413177918e63 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsuxei.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsuxei.ll
@@ -6,11 +6,11 @@
 
 declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i32(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   <vscale x 1 x i32>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -19,7 +19,7 @@ define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vsc
 entry:
   call void @llvm.riscv.vsuxei.nxv1i8.nxv1i32(
     <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
+    ptr %1,
     <vscale x 1 x i32> %2,
     iXLen %3)
 
@@ -28,12 +28,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i32(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -42,7 +42,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0,
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i32(
     <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
+    ptr %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
     iXLen %4)
@@ -52,11 +52,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i32(
   <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
+  ptr,
   <vscale x 2 x i32>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -65,7 +65,7 @@ define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vsc
 entry:
   call void @llvm.riscv.vsuxei.nxv2i8.nxv2i32(
     <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
+    ptr %1,
     <vscale x 2 x i32> %2,
     iXLen %3)
 
@@ -74,12 +74,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i32(
   <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
+  ptr,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -88,7 +88,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0,
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i32(
     <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
+    ptr %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
     iXLen %4)
@@ -98,11 +98,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i32(
   <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
+  ptr,
   <vscale x 4 x i32>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -111,7 +111,7 @@ define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vsc
 entry:
   call void @llvm.riscv.vsuxei.nxv4i8.nxv4i32(
     <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
+    ptr %1,
     <vscale x 4 x i32> %2,
     iXLen %3)
 
@@ -120,12 +120,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i32(
   <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
+  ptr,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -134,7 +134,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0,
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i32(
     <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
+    ptr %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
     iXLen %4)
@@ -144,11 +144,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i32(
   <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
+  ptr,
   <vscale x 8 x i32>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -157,7 +157,7 @@ define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vsc
 entry:
   call void @llvm.riscv.vsuxei.nxv8i8.nxv8i32(
     <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
+    ptr %1,
     <vscale x 8 x i32> %2,
     iXLen %3)
 
@@ -166,12 +166,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i32(
   <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
+  ptr,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -180,7 +180,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0,
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i32(
     <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
+    ptr %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
     iXLen %4)
@@ -190,11 +190,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv16i8.nxv16i32(
   <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
+  ptr,
   <vscale x 16 x i32>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -203,7 +203,7 @@ define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0,
 entry:
   call void @llvm.riscv.vsuxei.nxv16i8.nxv16i32(
     <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
+    ptr %1,
     <vscale x 16 x i32> %2,
     iXLen %3)
 
@@ -212,12 +212,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i32(
   <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
+  ptr,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -226,7 +226,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i32(
     <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
+    ptr %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
     iXLen %4)
@@ -236,11 +236,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i32(
   <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
+  ptr,
   <vscale x 1 x i32>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -249,7 +249,7 @@ define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <
 entry:
   call void @llvm.riscv.vsuxei.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
+    ptr %1,
     <vscale x 1 x i32> %2,
     iXLen %3)
 
@@ -258,12 +258,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i32(
   <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
+  ptr,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -272,7 +272,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
+    ptr %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
     iXLen %4)
@@ -282,11 +282,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i32(
   <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
+  ptr,
   <vscale x 2 x i32>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -295,7 +295,7 @@ define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <
 entry:
   call void @llvm.riscv.vsuxei.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
+    ptr %1,
     <vscale x 2 x i32> %2,
     iXLen %3)
 
@@ -304,12 +304,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i32(
   <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
+  ptr,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -318,7 +318,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
+    ptr %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
     iXLen %4)
@@ -328,11 +328,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i32(
   <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
+  ptr,
   <vscale x 4 x i32>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -341,7 +341,7 @@ define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <
 entry:
   call void @llvm.riscv.vsuxei.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
+    ptr %1,
     <vscale x 4 x i32> %2,
     iXLen %3)
 
@@ -350,12 +350,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i32(
   <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
+  ptr,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -364,7 +364,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
+    ptr %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
     iXLen %4)
@@ -374,11 +374,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i32(
   <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
+  ptr,
   <vscale x 8 x i32>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -387,7 +387,7 @@ define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <
 entry:
   call void @llvm.riscv.vsuxei.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
+    ptr %1,
     <vscale x 8 x i32> %2,
     iXLen %3)
 
@@ -396,12 +396,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i32(
   <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
+  ptr,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -410,7 +410,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
+    ptr %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
     iXLen %4)
@@ -420,11 +420,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv16i16.nxv16i32(
   <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
+  ptr,
   <vscale x 16 x i32>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -433,7 +433,7 @@ define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %
 entry:
   call void @llvm.riscv.vsuxei.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
+    ptr %1,
     <vscale x 16 x i32> %2,
     iXLen %3)
 
@@ -442,12 +442,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i32(
   <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
+  ptr,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -456,7 +456,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
+    ptr %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
     iXLen %4)
@@ -466,11 +466,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
+  ptr,
   <vscale x 1 x i32>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -479,7 +479,7 @@ define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <
 entry:
   call void @llvm.riscv.vsuxei.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
+    ptr %1,
     <vscale x 1 x i32> %2,
     iXLen %3)
 
@@ -488,12 +488,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
+  ptr,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -502,7 +502,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
+    ptr %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
     iXLen %4)
@@ -512,11 +512,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
+  ptr,
   <vscale x 2 x i32>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -525,7 +525,7 @@ define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <
 entry:
   call void @llvm.riscv.vsuxei.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
+    ptr %1,
     <vscale x 2 x i32> %2,
     iXLen %3)
 
@@ -534,12 +534,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
+  ptr,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -548,7 +548,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
+    ptr %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
     iXLen %4)
@@ -558,11 +558,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   <vscale x 4 x i32>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -571,7 +571,7 @@ define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <
 entry:
   call void @llvm.riscv.vsuxei.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
+    ptr %1,
     <vscale x 4 x i32> %2,
     iXLen %3)
 
@@ -580,12 +580,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -594,7 +594,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
+    ptr %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
     iXLen %4)
@@ -604,11 +604,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
+  ptr,
   <vscale x 8 x i32>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -617,7 +617,7 @@ define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <
 entry:
   call void @llvm.riscv.vsuxei.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
+    ptr %1,
     <vscale x 8 x i32> %2,
     iXLen %3)
 
@@ -626,12 +626,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
+  ptr,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -640,7 +640,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
+    ptr %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
     iXLen %4)
@@ -650,11 +650,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
+  ptr,
   <vscale x 16 x i32>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -663,7 +663,7 @@ define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %
 entry:
   call void @llvm.riscv.vsuxei.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
+    ptr %1,
     <vscale x 16 x i32> %2,
     iXLen %3)
 
@@ -672,12 +672,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
+  ptr,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -686,7 +686,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
+    ptr %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
     iXLen %4)
@@ -696,11 +696,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i32(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   <vscale x 1 x i32>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -709,7 +709,7 @@ define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <
 entry:
   call void @llvm.riscv.vsuxei.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
+    ptr %1,
     <vscale x 1 x i32> %2,
     iXLen %3)
 
@@ -718,12 +718,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i32(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -732,7 +732,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
+    ptr %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
     iXLen %4)
@@ -742,11 +742,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i32(
   <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
+  ptr,
   <vscale x 2 x i32>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -755,7 +755,7 @@ define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <
 entry:
   call void @llvm.riscv.vsuxei.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
+    ptr %1,
     <vscale x 2 x i32> %2,
     iXLen %3)
 
@@ -764,12 +764,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i32(
   <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
+  ptr,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -778,7 +778,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
+    ptr %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
     iXLen %4)
@@ -788,11 +788,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i32(
   <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
+  ptr,
   <vscale x 4 x i32>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -801,7 +801,7 @@ define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <
 entry:
   call void @llvm.riscv.vsuxei.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
+    ptr %1,
     <vscale x 4 x i32> %2,
     iXLen %3)
 
@@ -810,12 +810,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i32(
   <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
+  ptr,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -824,7 +824,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
+    ptr %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
     iXLen %4)
@@ -834,11 +834,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i32(
   <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
+  ptr,
   <vscale x 8 x i32>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -847,7 +847,7 @@ define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <
 entry:
   call void @llvm.riscv.vsuxei.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
+    ptr %1,
     <vscale x 8 x i32> %2,
     iXLen %3)
 
@@ -856,12 +856,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i32(
   <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
+  ptr,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -870,7 +870,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
+    ptr %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
     iXLen %4)
@@ -880,11 +880,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i32(
   <vscale x 1 x half>,
-  <vscale x 1 x half>*,
+  ptr,
   <vscale x 1 x i32>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -893,7 +893,7 @@ define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0,
 entry:
   call void @llvm.riscv.vsuxei.nxv1f16.nxv1i32(
     <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
+    ptr %1,
     <vscale x 1 x i32> %2,
     iXLen %3)
 
@@ -902,12 +902,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i32(
   <vscale x 1 x half>,
-  <vscale x 1 x half>*,
+  ptr,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -916,7 +916,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i32(
     <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
+    ptr %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
     iXLen %4)
@@ -926,11 +926,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i32(
   <vscale x 2 x half>,
-  <vscale x 2 x half>*,
+  ptr,
   <vscale x 2 x i32>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -939,7 +939,7 @@ define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0,
 entry:
   call void @llvm.riscv.vsuxei.nxv2f16.nxv2i32(
     <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
+    ptr %1,
     <vscale x 2 x i32> %2,
     iXLen %3)
 
@@ -948,12 +948,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i32(
   <vscale x 2 x half>,
-  <vscale x 2 x half>*,
+  ptr,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -962,7 +962,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i32(
     <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
+    ptr %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
     iXLen %4)
@@ -972,11 +972,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i32(
   <vscale x 4 x half>,
-  <vscale x 4 x half>*,
+  ptr,
   <vscale x 4 x i32>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -985,7 +985,7 @@ define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0,
 entry:
   call void @llvm.riscv.vsuxei.nxv4f16.nxv4i32(
     <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
+    ptr %1,
     <vscale x 4 x i32> %2,
     iXLen %3)
 
@@ -994,12 +994,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i32(
   <vscale x 4 x half>,
-  <vscale x 4 x half>*,
+  ptr,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1008,7 +1008,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i32(
     <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
+    ptr %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
     iXLen %4)
@@ -1018,11 +1018,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i32(
   <vscale x 8 x half>,
-  <vscale x 8 x half>*,
+  ptr,
   <vscale x 8 x i32>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1031,7 +1031,7 @@ define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0,
 entry:
   call void @llvm.riscv.vsuxei.nxv8f16.nxv8i32(
     <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
+    ptr %1,
     <vscale x 8 x i32> %2,
     iXLen %3)
 
@@ -1040,12 +1040,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i32(
   <vscale x 8 x half>,
-  <vscale x 8 x half>*,
+  ptr,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1054,7 +1054,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i32(
     <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
+    ptr %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
     iXLen %4)
@@ -1064,11 +1064,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv16f16.nxv16i32(
   <vscale x 16 x half>,
-  <vscale x 16 x half>*,
+  ptr,
   <vscale x 16 x i32>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1077,7 +1077,7 @@ define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half>
 entry:
   call void @llvm.riscv.vsuxei.nxv16f16.nxv16i32(
     <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
+    ptr %1,
     <vscale x 16 x i32> %2,
     iXLen %3)
 
@@ -1086,12 +1086,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i32(
   <vscale x 16 x half>,
-  <vscale x 16 x half>*,
+  ptr,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1100,7 +1100,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x h
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i32(
     <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
+    ptr %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
     iXLen %4)
@@ -1110,11 +1110,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i32(
   <vscale x 1 x float>,
-  <vscale x 1 x float>*,
+  ptr,
   <vscale x 1 x i32>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1123,7 +1123,7 @@ define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0,
 entry:
   call void @llvm.riscv.vsuxei.nxv1f32.nxv1i32(
     <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
+    ptr %1,
     <vscale x 1 x i32> %2,
     iXLen %3)
 
@@ -1132,12 +1132,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i32(
   <vscale x 1 x float>,
-  <vscale x 1 x float>*,
+  ptr,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1146,7 +1146,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i32(
     <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
+    ptr %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
     iXLen %4)
@@ -1156,11 +1156,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i32(
   <vscale x 2 x float>,
-  <vscale x 2 x float>*,
+  ptr,
   <vscale x 2 x i32>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1169,7 +1169,7 @@ define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0,
 entry:
   call void @llvm.riscv.vsuxei.nxv2f32.nxv2i32(
     <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
+    ptr %1,
     <vscale x 2 x i32> %2,
     iXLen %3)
 
@@ -1178,12 +1178,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i32(
   <vscale x 2 x float>,
-  <vscale x 2 x float>*,
+  ptr,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1192,7 +1192,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i32(
     <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
+    ptr %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
     iXLen %4)
@@ -1202,11 +1202,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i32(
   <vscale x 4 x float>,
-  <vscale x 4 x float>*,
+  ptr,
   <vscale x 4 x i32>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1215,7 +1215,7 @@ define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0,
 entry:
   call void @llvm.riscv.vsuxei.nxv4f32.nxv4i32(
     <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
+    ptr %1,
     <vscale x 4 x i32> %2,
     iXLen %3)
 
@@ -1224,12 +1224,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i32(
   <vscale x 4 x float>,
-  <vscale x 4 x float>*,
+  ptr,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1238,7 +1238,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i32(
     <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
+    ptr %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
     iXLen %4)
@@ -1248,11 +1248,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i32(
   <vscale x 8 x float>,
-  <vscale x 8 x float>*,
+  ptr,
   <vscale x 8 x i32>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1261,7 +1261,7 @@ define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0,
 entry:
   call void @llvm.riscv.vsuxei.nxv8f32.nxv8i32(
     <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
+    ptr %1,
     <vscale x 8 x i32> %2,
     iXLen %3)
 
@@ -1270,12 +1270,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i32(
   <vscale x 8 x float>,
-  <vscale x 8 x float>*,
+  ptr,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1284,7 +1284,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i32(
     <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
+    ptr %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
     iXLen %4)
@@ -1294,11 +1294,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv16f32.nxv16i32(
   <vscale x 16 x float>,
-  <vscale x 16 x float>*,
+  ptr,
   <vscale x 16 x i32>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -1307,7 +1307,7 @@ define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float>
 entry:
   call void @llvm.riscv.vsuxei.nxv16f32.nxv16i32(
     <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
+    ptr %1,
     <vscale x 16 x i32> %2,
     iXLen %3)
 
@@ -1316,12 +1316,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i32(
   <vscale x 16 x float>,
-  <vscale x 16 x float>*,
+  ptr,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -1330,7 +1330,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x f
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i32(
     <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
+    ptr %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
     iXLen %4)
@@ -1340,11 +1340,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i32(
   <vscale x 1 x double>,
-  <vscale x 1 x double>*,
+  ptr,
   <vscale x 1 x i32>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -1353,7 +1353,7 @@ define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0
 entry:
   call void @llvm.riscv.vsuxei.nxv1f64.nxv1i32(
     <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
+    ptr %1,
     <vscale x 1 x i32> %2,
     iXLen %3)
 
@@ -1362,12 +1362,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i32(
   <vscale x 1 x double>,
-  <vscale x 1 x double>*,
+  ptr,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -1376,7 +1376,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x doubl
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i32(
     <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
+    ptr %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
     iXLen %4)
@@ -1386,11 +1386,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i32(
   <vscale x 2 x double>,
-  <vscale x 2 x double>*,
+  ptr,
   <vscale x 2 x i32>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -1399,7 +1399,7 @@ define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0
 entry:
   call void @llvm.riscv.vsuxei.nxv2f64.nxv2i32(
     <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
+    ptr %1,
     <vscale x 2 x i32> %2,
     iXLen %3)
 
@@ -1408,12 +1408,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i32(
   <vscale x 2 x double>,
-  <vscale x 2 x double>*,
+  ptr,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -1422,7 +1422,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x doubl
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i32(
     <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
+    ptr %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
     iXLen %4)
@@ -1432,11 +1432,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i32(
   <vscale x 4 x double>,
-  <vscale x 4 x double>*,
+  ptr,
   <vscale x 4 x i32>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -1445,7 +1445,7 @@ define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0
 entry:
   call void @llvm.riscv.vsuxei.nxv4f64.nxv4i32(
     <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
+    ptr %1,
     <vscale x 4 x i32> %2,
     iXLen %3)
 
@@ -1454,12 +1454,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i32(
   <vscale x 4 x double>,
-  <vscale x 4 x double>*,
+  ptr,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -1468,7 +1468,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x doubl
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i32(
     <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
+    ptr %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
     iXLen %4)
@@ -1478,11 +1478,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i32(
   <vscale x 8 x double>,
-  <vscale x 8 x double>*,
+  ptr,
   <vscale x 8 x i32>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -1491,7 +1491,7 @@ define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0
 entry:
   call void @llvm.riscv.vsuxei.nxv8f64.nxv8i32(
     <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
+    ptr %1,
     <vscale x 8 x i32> %2,
     iXLen %3)
 
@@ -1500,12 +1500,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i32(
   <vscale x 8 x double>,
-  <vscale x 8 x double>*,
+  ptr,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -1514,7 +1514,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x doubl
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i32(
     <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
+    ptr %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
     iXLen %4)
@@ -1524,11 +1524,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i16(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   <vscale x 1 x i16>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -1537,7 +1537,7 @@ define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vsc
 entry:
   call void @llvm.riscv.vsuxei.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
+    ptr %1,
     <vscale x 1 x i16> %2,
     iXLen %3)
 
@@ -1546,12 +1546,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i16(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -1560,7 +1560,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0,
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
+    ptr %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
     iXLen %4)
@@ -1570,11 +1570,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i16(
   <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
+  ptr,
   <vscale x 2 x i16>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -1583,7 +1583,7 @@ define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vsc
 entry:
   call void @llvm.riscv.vsuxei.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
+    ptr %1,
     <vscale x 2 x i16> %2,
     iXLen %3)
 
@@ -1592,12 +1592,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i16(
   <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
+  ptr,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -1606,7 +1606,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0,
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
+    ptr %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
     iXLen %4)
@@ -1616,11 +1616,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i16(
   <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
+  ptr,
   <vscale x 4 x i16>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -1629,7 +1629,7 @@ define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vsc
 entry:
   call void @llvm.riscv.vsuxei.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
+    ptr %1,
     <vscale x 4 x i16> %2,
     iXLen %3)
 
@@ -1638,12 +1638,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i16(
   <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
+  ptr,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -1652,7 +1652,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0,
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
+    ptr %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
     iXLen %4)
@@ -1662,11 +1662,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i16(
   <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
+  ptr,
   <vscale x 8 x i16>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -1675,7 +1675,7 @@ define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vsc
 entry:
   call void @llvm.riscv.vsuxei.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
+    ptr %1,
     <vscale x 8 x i16> %2,
     iXLen %3)
 
@@ -1684,12 +1684,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i16(
   <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
+  ptr,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -1698,7 +1698,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0,
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
+    ptr %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
     iXLen %4)
@@ -1708,11 +1708,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv16i8.nxv16i16(
   <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
+  ptr,
   <vscale x 16 x i16>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -1721,7 +1721,7 @@ define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0,
 entry:
   call void @llvm.riscv.vsuxei.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
+    ptr %1,
     <vscale x 16 x i16> %2,
     iXLen %3)
 
@@ -1730,12 +1730,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i16(
   <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
+  ptr,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -1744,7 +1744,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
+    ptr %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
     iXLen %4)
@@ -1754,11 +1754,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv32i8.nxv32i16(
   <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
+  ptr,
   <vscale x 32 x i16>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -1767,7 +1767,7 @@ define void @intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0,
 entry:
   call void @llvm.riscv.vsuxei.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
-    <vscale x 32 x i8>* %1,
+    ptr %1,
     <vscale x 32 x i16> %2,
     iXLen %3)
 
@@ -1776,12 +1776,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i16(
   <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
+  ptr,
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -1790,7 +1790,7 @@ define void @intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
-    <vscale x 32 x i8>* %1,
+    ptr %1,
     <vscale x 32 x i16> %2,
     <vscale x 32 x i1> %3,
     iXLen %4)
@@ -1800,11 +1800,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
+  ptr,
   <vscale x 1 x i16>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1813,7 +1813,7 @@ define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <
 entry:
   call void @llvm.riscv.vsuxei.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
+    ptr %1,
     <vscale x 1 x i16> %2,
     iXLen %3)
 
@@ -1822,12 +1822,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
+  ptr,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1836,7 +1836,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
+    ptr %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
     iXLen %4)
@@ -1846,11 +1846,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
+  ptr,
   <vscale x 2 x i16>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1859,7 +1859,7 @@ define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <
 entry:
   call void @llvm.riscv.vsuxei.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
+    ptr %1,
     <vscale x 2 x i16> %2,
     iXLen %3)
 
@@ -1868,12 +1868,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
+  ptr,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1882,7 +1882,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
+    ptr %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
     iXLen %4)
@@ -1892,11 +1892,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
+  ptr,
   <vscale x 4 x i16>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1905,7 +1905,7 @@ define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <
 entry:
   call void @llvm.riscv.vsuxei.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
+    ptr %1,
     <vscale x 4 x i16> %2,
     iXLen %3)
 
@@ -1914,12 +1914,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
+  ptr,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1928,7 +1928,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
+    ptr %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
     iXLen %4)
@@ -1938,11 +1938,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
+  ptr,
   <vscale x 8 x i16>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1951,7 +1951,7 @@ define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <
 entry:
   call void @llvm.riscv.vsuxei.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
+    ptr %1,
     <vscale x 8 x i16> %2,
     iXLen %3)
 
@@ -1960,12 +1960,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
+  ptr,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1974,7 +1974,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
+    ptr %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
     iXLen %4)
@@ -1984,11 +1984,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
+  ptr,
   <vscale x 16 x i16>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1997,7 +1997,7 @@ define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %
 entry:
   call void @llvm.riscv.vsuxei.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
+    ptr %1,
     <vscale x 16 x i16> %2,
     iXLen %3)
 
@@ -2006,12 +2006,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
+  ptr,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -2020,7 +2020,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
+    ptr %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
     iXLen %4)
@@ -2030,11 +2030,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
+  ptr,
   <vscale x 32 x i16>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -2043,7 +2043,7 @@ define void @intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %
 entry:
   call void @llvm.riscv.vsuxei.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
-    <vscale x 32 x i16>* %1,
+    ptr %1,
     <vscale x 32 x i16> %2,
     iXLen %3)
 
@@ -2052,12 +2052,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
+  ptr,
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -2066,7 +2066,7 @@ define void @intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
-    <vscale x 32 x i16>* %1,
+    ptr %1,
     <vscale x 32 x i16> %2,
     <vscale x 32 x i1> %3,
     iXLen %4)
@@ -2076,11 +2076,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i16(
   <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
+  ptr,
   <vscale x 1 x i16>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -2089,7 +2089,7 @@ define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <
 entry:
   call void @llvm.riscv.vsuxei.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
+    ptr %1,
     <vscale x 1 x i16> %2,
     iXLen %3)
 
@@ -2098,12 +2098,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16(
   <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
+  ptr,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -2112,7 +2112,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
+    ptr %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
     iXLen %4)
@@ -2122,11 +2122,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i16(
   <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
+  ptr,
   <vscale x 2 x i16>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -2135,7 +2135,7 @@ define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <
 entry:
   call void @llvm.riscv.vsuxei.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
+    ptr %1,
     <vscale x 2 x i16> %2,
     iXLen %3)
 
@@ -2144,12 +2144,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i16(
   <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
+  ptr,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -2158,7 +2158,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
+    ptr %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
     iXLen %4)
@@ -2168,11 +2168,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i16(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   <vscale x 4 x i16>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -2181,7 +2181,7 @@ define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <
 entry:
   call void @llvm.riscv.vsuxei.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
+    ptr %1,
     <vscale x 4 x i16> %2,
     iXLen %3)
 
@@ -2190,12 +2190,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i16(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -2204,7 +2204,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
+    ptr %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
     iXLen %4)
@@ -2214,11 +2214,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i16(
   <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
+  ptr,
   <vscale x 8 x i16>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -2227,7 +2227,7 @@ define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <
 entry:
   call void @llvm.riscv.vsuxei.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
+    ptr %1,
     <vscale x 8 x i16> %2,
     iXLen %3)
 
@@ -2236,12 +2236,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i16(
   <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
+  ptr,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -2250,7 +2250,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
+    ptr %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
     iXLen %4)
@@ -2260,11 +2260,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv16i32.nxv16i16(
   <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
+  ptr,
   <vscale x 16 x i16>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -2273,7 +2273,7 @@ define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %
 entry:
   call void @llvm.riscv.vsuxei.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
+    ptr %1,
     <vscale x 16 x i16> %2,
     iXLen %3)
 
@@ -2282,12 +2282,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i16(
   <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
+  ptr,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -2296,7 +2296,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
+    ptr %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
     iXLen %4)
@@ -2306,11 +2306,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i16(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   <vscale x 1 x i16>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -2319,7 +2319,7 @@ define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, <
 entry:
   call void @llvm.riscv.vsuxei.nxv1i64.nxv1i16(
     <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
+    ptr %1,
     <vscale x 1 x i16> %2,
     iXLen %3)
 
@@ -2328,12 +2328,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i16(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -2342,7 +2342,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
+    ptr %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
     iXLen %4)
@@ -2352,11 +2352,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i16(
   <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
+  ptr,
   <vscale x 2 x i16>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -2365,7 +2365,7 @@ define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, <
 entry:
   call void @llvm.riscv.vsuxei.nxv2i64.nxv2i16(
     <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
+    ptr %1,
     <vscale x 2 x i16> %2,
     iXLen %3)
 
@@ -2374,12 +2374,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i16(
   <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
+  ptr,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -2388,7 +2388,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
+    ptr %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
     iXLen %4)
@@ -2398,11 +2398,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i16(
   <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
+  ptr,
   <vscale x 4 x i16>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -2411,7 +2411,7 @@ define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <
 entry:
   call void @llvm.riscv.vsuxei.nxv4i64.nxv4i16(
     <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
+    ptr %1,
     <vscale x 4 x i16> %2,
     iXLen %3)
 
@@ -2420,12 +2420,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i16(
   <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
+  ptr,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -2434,7 +2434,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
+    ptr %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
     iXLen %4)
@@ -2444,11 +2444,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i16(
   <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
+  ptr,
   <vscale x 8 x i16>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -2457,7 +2457,7 @@ define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <
 entry:
   call void @llvm.riscv.vsuxei.nxv8i64.nxv8i16(
     <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
+    ptr %1,
     <vscale x 8 x i16> %2,
     iXLen %3)
 
@@ -2466,12 +2466,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i16(
   <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
+  ptr,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -2480,7 +2480,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
+    ptr %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
     iXLen %4)
@@ -2490,11 +2490,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i16(
   <vscale x 1 x half>,
-  <vscale x 1 x half>*,
+  ptr,
   <vscale x 1 x i16>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -2503,7 +2503,7 @@ define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0,
 entry:
   call void @llvm.riscv.vsuxei.nxv1f16.nxv1i16(
     <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
+    ptr %1,
     <vscale x 1 x i16> %2,
     iXLen %3)
 
@@ -2512,12 +2512,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i16(
   <vscale x 1 x half>,
-  <vscale x 1 x half>*,
+  ptr,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -2526,7 +2526,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i16(
     <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
+    ptr %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
     iXLen %4)
@@ -2536,11 +2536,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i16(
   <vscale x 2 x half>,
-  <vscale x 2 x half>*,
+  ptr,
   <vscale x 2 x i16>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -2549,7 +2549,7 @@ define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0,
 entry:
   call void @llvm.riscv.vsuxei.nxv2f16.nxv2i16(
     <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
+    ptr %1,
     <vscale x 2 x i16> %2,
     iXLen %3)
 
@@ -2558,12 +2558,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i16(
   <vscale x 2 x half>,
-  <vscale x 2 x half>*,
+  ptr,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -2572,7 +2572,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i16(
     <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
+    ptr %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
     iXLen %4)
@@ -2582,11 +2582,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i16(
   <vscale x 4 x half>,
-  <vscale x 4 x half>*,
+  ptr,
   <vscale x 4 x i16>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -2595,7 +2595,7 @@ define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0,
 entry:
   call void @llvm.riscv.vsuxei.nxv4f16.nxv4i16(
     <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
+    ptr %1,
     <vscale x 4 x i16> %2,
     iXLen %3)
 
@@ -2604,12 +2604,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i16(
   <vscale x 4 x half>,
-  <vscale x 4 x half>*,
+  ptr,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -2618,7 +2618,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i16(
     <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
+    ptr %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
     iXLen %4)
@@ -2628,11 +2628,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i16(
   <vscale x 8 x half>,
-  <vscale x 8 x half>*,
+  ptr,
   <vscale x 8 x i16>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -2641,7 +2641,7 @@ define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0,
 entry:
   call void @llvm.riscv.vsuxei.nxv8f16.nxv8i16(
     <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
+    ptr %1,
     <vscale x 8 x i16> %2,
     iXLen %3)
 
@@ -2650,12 +2650,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i16(
   <vscale x 8 x half>,
-  <vscale x 8 x half>*,
+  ptr,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -2664,7 +2664,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i16(
     <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
+    ptr %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
     iXLen %4)
@@ -2674,11 +2674,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv16f16.nxv16i16(
   <vscale x 16 x half>,
-  <vscale x 16 x half>*,
+  ptr,
   <vscale x 16 x i16>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -2687,7 +2687,7 @@ define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half>
 entry:
   call void @llvm.riscv.vsuxei.nxv16f16.nxv16i16(
     <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
+    ptr %1,
     <vscale x 16 x i16> %2,
     iXLen %3)
 
@@ -2696,12 +2696,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i16(
   <vscale x 16 x half>,
-  <vscale x 16 x half>*,
+  ptr,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -2710,7 +2710,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x h
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i16(
     <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
+    ptr %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
     iXLen %4)
@@ -2720,11 +2720,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv32f16.nxv32i16(
   <vscale x 32 x half>,
-  <vscale x 32 x half>*,
+  ptr,
   <vscale x 32 x i16>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -2733,7 +2733,7 @@ define void @intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half>
 entry:
   call void @llvm.riscv.vsuxei.nxv32f16.nxv32i16(
     <vscale x 32 x half> %0,
-    <vscale x 32 x half>* %1,
+    ptr %1,
     <vscale x 32 x i16> %2,
     iXLen %3)
 
@@ -2742,12 +2742,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i16(
   <vscale x 32 x half>,
-  <vscale x 32 x half>*,
+  ptr,
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -2756,7 +2756,7 @@ define void @intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x h
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i16(
     <vscale x 32 x half> %0,
-    <vscale x 32 x half>* %1,
+    ptr %1,
     <vscale x 32 x i16> %2,
     <vscale x 32 x i1> %3,
     iXLen %4)
@@ -2766,11 +2766,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i16(
   <vscale x 1 x float>,
-  <vscale x 1 x float>*,
+  ptr,
   <vscale x 1 x i16>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -2779,7 +2779,7 @@ define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0,
 entry:
   call void @llvm.riscv.vsuxei.nxv1f32.nxv1i16(
     <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
+    ptr %1,
     <vscale x 1 x i16> %2,
     iXLen %3)
 
@@ -2788,12 +2788,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i16(
   <vscale x 1 x float>,
-  <vscale x 1 x float>*,
+  ptr,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -2802,7 +2802,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i16(
     <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
+    ptr %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
     iXLen %4)
@@ -2812,11 +2812,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i16(
   <vscale x 2 x float>,
-  <vscale x 2 x float>*,
+  ptr,
   <vscale x 2 x i16>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -2825,7 +2825,7 @@ define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0,
 entry:
   call void @llvm.riscv.vsuxei.nxv2f32.nxv2i16(
     <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
+    ptr %1,
     <vscale x 2 x i16> %2,
     iXLen %3)
 
@@ -2834,12 +2834,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i16(
   <vscale x 2 x float>,
-  <vscale x 2 x float>*,
+  ptr,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -2848,7 +2848,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i16(
     <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
+    ptr %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
     iXLen %4)
@@ -2858,11 +2858,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i16(
   <vscale x 4 x float>,
-  <vscale x 4 x float>*,
+  ptr,
   <vscale x 4 x i16>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -2871,7 +2871,7 @@ define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0,
 entry:
   call void @llvm.riscv.vsuxei.nxv4f32.nxv4i16(
     <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
+    ptr %1,
     <vscale x 4 x i16> %2,
     iXLen %3)
 
@@ -2880,12 +2880,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i16(
   <vscale x 4 x float>,
-  <vscale x 4 x float>*,
+  ptr,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -2894,7 +2894,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i16(
     <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
+    ptr %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
     iXLen %4)
@@ -2904,11 +2904,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i16(
   <vscale x 8 x float>,
-  <vscale x 8 x float>*,
+  ptr,
   <vscale x 8 x i16>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -2917,7 +2917,7 @@ define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0,
 entry:
   call void @llvm.riscv.vsuxei.nxv8f32.nxv8i16(
     <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
+    ptr %1,
     <vscale x 8 x i16> %2,
     iXLen %3)
 
@@ -2926,12 +2926,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i16(
   <vscale x 8 x float>,
-  <vscale x 8 x float>*,
+  ptr,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -2940,7 +2940,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i16(
     <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
+    ptr %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
     iXLen %4)
@@ -2950,11 +2950,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv16f32.nxv16i16(
   <vscale x 16 x float>,
-  <vscale x 16 x float>*,
+  ptr,
   <vscale x 16 x i16>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -2963,7 +2963,7 @@ define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float>
 entry:
   call void @llvm.riscv.vsuxei.nxv16f32.nxv16i16(
     <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
+    ptr %1,
     <vscale x 16 x i16> %2,
     iXLen %3)
 
@@ -2972,12 +2972,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i16(
   <vscale x 16 x float>,
-  <vscale x 16 x float>*,
+  ptr,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -2986,7 +2986,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x f
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i16(
     <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
+    ptr %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
     iXLen %4)
@@ -2996,11 +2996,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i16(
   <vscale x 1 x double>,
-  <vscale x 1 x double>*,
+  ptr,
   <vscale x 1 x i16>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -3009,7 +3009,7 @@ define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0
 entry:
   call void @llvm.riscv.vsuxei.nxv1f64.nxv1i16(
     <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
+    ptr %1,
     <vscale x 1 x i16> %2,
     iXLen %3)
 
@@ -3018,12 +3018,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i16(
   <vscale x 1 x double>,
-  <vscale x 1 x double>*,
+  ptr,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -3032,7 +3032,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x doubl
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i16(
     <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
+    ptr %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
     iXLen %4)
@@ -3042,11 +3042,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i16(
   <vscale x 2 x double>,
-  <vscale x 2 x double>*,
+  ptr,
   <vscale x 2 x i16>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -3055,7 +3055,7 @@ define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0
 entry:
   call void @llvm.riscv.vsuxei.nxv2f64.nxv2i16(
     <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
+    ptr %1,
     <vscale x 2 x i16> %2,
     iXLen %3)
 
@@ -3064,12 +3064,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i16(
   <vscale x 2 x double>,
-  <vscale x 2 x double>*,
+  ptr,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -3078,7 +3078,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x doubl
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i16(
     <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
+    ptr %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
     iXLen %4)
@@ -3088,11 +3088,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i16(
   <vscale x 4 x double>,
-  <vscale x 4 x double>*,
+  ptr,
   <vscale x 4 x i16>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -3101,7 +3101,7 @@ define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0
 entry:
   call void @llvm.riscv.vsuxei.nxv4f64.nxv4i16(
     <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
+    ptr %1,
     <vscale x 4 x i16> %2,
     iXLen %3)
 
@@ -3110,12 +3110,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i16(
   <vscale x 4 x double>,
-  <vscale x 4 x double>*,
+  ptr,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -3124,7 +3124,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x doubl
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i16(
     <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
+    ptr %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
     iXLen %4)
@@ -3134,11 +3134,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i16(
   <vscale x 8 x double>,
-  <vscale x 8 x double>*,
+  ptr,
   <vscale x 8 x i16>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -3147,7 +3147,7 @@ define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0
 entry:
   call void @llvm.riscv.vsuxei.nxv8f64.nxv8i16(
     <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
+    ptr %1,
     <vscale x 8 x i16> %2,
     iXLen %3)
 
@@ -3156,12 +3156,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i16(
   <vscale x 8 x double>,
-  <vscale x 8 x double>*,
+  ptr,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -3170,7 +3170,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x doubl
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i16(
     <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
+    ptr %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
     iXLen %4)
@@ -3180,11 +3180,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   <vscale x 1 x i8>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -3193,7 +3193,7 @@ define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vsca
 entry:
   call void @llvm.riscv.vsuxei.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
+    ptr %1,
     <vscale x 1 x i8> %2,
     iXLen %3)
 
@@ -3202,12 +3202,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
+  ptr,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -3216,7 +3216,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0,
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
+    ptr %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
     iXLen %4)
@@ -3226,11 +3226,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
+  ptr,
   <vscale x 2 x i8>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -3239,7 +3239,7 @@ define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vsca
 entry:
   call void @llvm.riscv.vsuxei.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
+    ptr %1,
     <vscale x 2 x i8> %2,
     iXLen %3)
 
@@ -3248,12 +3248,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
+  ptr,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -3262,7 +3262,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0,
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
+    ptr %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
     iXLen %4)
@@ -3272,11 +3272,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
+  ptr,
   <vscale x 4 x i8>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -3285,7 +3285,7 @@ define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vsca
 entry:
   call void @llvm.riscv.vsuxei.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
+    ptr %1,
     <vscale x 4 x i8> %2,
     iXLen %3)
 
@@ -3294,12 +3294,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
+  ptr,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -3308,7 +3308,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0,
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
+    ptr %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
     iXLen %4)
@@ -3318,11 +3318,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
+  ptr,
   <vscale x 8 x i8>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -3331,7 +3331,7 @@ define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vsca
 entry:
   call void @llvm.riscv.vsuxei.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
+    ptr %1,
     <vscale x 8 x i8> %2,
     iXLen %3)
 
@@ -3340,12 +3340,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
+  ptr,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -3354,7 +3354,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0,
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
+    ptr %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
     iXLen %4)
@@ -3364,11 +3364,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
+  ptr,
   <vscale x 16 x i8>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -3377,7 +3377,7 @@ define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <
 entry:
   call void @llvm.riscv.vsuxei.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
+    ptr %1,
     <vscale x 16 x i8> %2,
     iXLen %3)
 
@@ -3386,12 +3386,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
+  ptr,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -3400,7 +3400,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
+    ptr %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
     iXLen %4)
@@ -3410,11 +3410,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
+  ptr,
   <vscale x 32 x i8>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -3423,7 +3423,7 @@ define void @intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <
 entry:
   call void @llvm.riscv.vsuxei.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
-    <vscale x 32 x i8>* %1,
+    ptr %1,
     <vscale x 32 x i8> %2,
     iXLen %3)
 
@@ -3432,12 +3432,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
+  ptr,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -3446,7 +3446,7 @@ define void @intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
-    <vscale x 32 x i8>* %1,
+    ptr %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
     iXLen %4)
@@ -3456,11 +3456,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
-  <vscale x 64 x i8>*,
+  ptr,
   <vscale x 64 x i8>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, ptr %1, <vscale x 64 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
@@ -3469,7 +3469,7 @@ define void @intrinsic_vsuxei_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <
 entry:
   call void @llvm.riscv.vsuxei.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
-    <vscale x 64 x i8>* %1,
+    ptr %1,
     <vscale x 64 x i8> %2,
     iXLen %3)
 
@@ -3478,12 +3478,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
-  <vscale x 64 x i8>*,
+  ptr,
   <vscale x 64 x i8>,
   <vscale x 64 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, ptr %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
@@ -3492,7 +3492,7 @@ define void @intrinsic_vsuxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
-    <vscale x 64 x i8>* %1,
+    ptr %1,
     <vscale x 64 x i8> %2,
     <vscale x 64 x i1> %3,
     iXLen %4)
@@ -3502,11 +3502,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
+  ptr,
   <vscale x 1 x i8>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -3515,7 +3515,7 @@ define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <v
 entry:
   call void @llvm.riscv.vsuxei.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
+    ptr %1,
     <vscale x 1 x i8> %2,
     iXLen %3)
 
@@ -3524,12 +3524,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
+  ptr,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -3538,7 +3538,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
+    ptr %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
     iXLen %4)
@@ -3548,11 +3548,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i8(
   <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
+  ptr,
   <vscale x 2 x i8>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -3561,7 +3561,7 @@ define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <v
 entry:
   call void @llvm.riscv.vsuxei.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
+    ptr %1,
     <vscale x 2 x i8> %2,
     iXLen %3)
 
@@ -3570,12 +3570,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i8(
   <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
+  ptr,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -3584,7 +3584,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
+    ptr %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
     iXLen %4)
@@ -3594,11 +3594,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i8(
   <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
+  ptr,
   <vscale x 4 x i8>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -3607,7 +3607,7 @@ define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <v
 entry:
   call void @llvm.riscv.vsuxei.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
+    ptr %1,
     <vscale x 4 x i8> %2,
     iXLen %3)
 
@@ -3616,12 +3616,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i8(
   <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
+  ptr,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -3630,7 +3630,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
+    ptr %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
     iXLen %4)
@@ -3640,11 +3640,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i8(
   <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
+  ptr,
   <vscale x 8 x i8>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -3653,7 +3653,7 @@ define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <v
 entry:
   call void @llvm.riscv.vsuxei.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
+    ptr %1,
     <vscale x 8 x i8> %2,
     iXLen %3)
 
@@ -3662,12 +3662,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i8(
   <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
+  ptr,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -3676,7 +3676,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
+    ptr %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
     iXLen %4)
@@ -3686,11 +3686,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv16i16.nxv16i8(
   <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
+  ptr,
   <vscale x 16 x i8>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -3699,7 +3699,7 @@ define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0
 entry:
   call void @llvm.riscv.vsuxei.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
+    ptr %1,
     <vscale x 16 x i8> %2,
     iXLen %3)
 
@@ -3708,12 +3708,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i8(
   <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
+  ptr,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -3722,7 +3722,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i1
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
+    ptr %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
     iXLen %4)
@@ -3732,11 +3732,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv32i16.nxv32i8(
   <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
+  ptr,
   <vscale x 32 x i8>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -3745,7 +3745,7 @@ define void @intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0
 entry:
   call void @llvm.riscv.vsuxei.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
-    <vscale x 32 x i16>* %1,
+    ptr %1,
     <vscale x 32 x i8> %2,
     iXLen %3)
 
@@ -3754,12 +3754,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i8(
   <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
+  ptr,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -3768,7 +3768,7 @@ define void @intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i1
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
-    <vscale x 32 x i16>* %1,
+    ptr %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
     iXLen %4)
@@ -3778,11 +3778,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i8(
   <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
+  ptr,
   <vscale x 1 x i8>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -3791,7 +3791,7 @@ define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <v
 entry:
   call void @llvm.riscv.vsuxei.nxv1i32.nxv1i8(
     <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
+    ptr %1,
     <vscale x 1 x i8> %2,
     iXLen %3)
 
@@ -3800,12 +3800,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i8(
   <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
+  ptr,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -3814,7 +3814,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
+    ptr %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
     iXLen %4)
@@ -3824,11 +3824,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i8(
   <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
+  ptr,
   <vscale x 2 x i8>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -3837,7 +3837,7 @@ define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <v
 entry:
   call void @llvm.riscv.vsuxei.nxv2i32.nxv2i8(
     <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
+    ptr %1,
     <vscale x 2 x i8> %2,
     iXLen %3)
 
@@ -3846,12 +3846,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i8(
   <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
+  ptr,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -3860,7 +3860,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
+    ptr %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
     iXLen %4)
@@ -3870,11 +3870,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i8(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   <vscale x 4 x i8>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -3883,7 +3883,7 @@ define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <v
 entry:
   call void @llvm.riscv.vsuxei.nxv4i32.nxv4i8(
     <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
+    ptr %1,
     <vscale x 4 x i8> %2,
     iXLen %3)
 
@@ -3892,12 +3892,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i8(
   <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
+  ptr,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -3906,7 +3906,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
+    ptr %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
     iXLen %4)
@@ -3916,11 +3916,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i8(
   <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
+  ptr,
   <vscale x 8 x i8>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -3929,7 +3929,7 @@ define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <v
 entry:
   call void @llvm.riscv.vsuxei.nxv8i32.nxv8i8(
     <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
+    ptr %1,
     <vscale x 8 x i8> %2,
     iXLen %3)
 
@@ -3938,12 +3938,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i8(
   <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
+  ptr,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -3952,7 +3952,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
+    ptr %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
     iXLen %4)
@@ -3962,11 +3962,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv16i32.nxv16i8(
   <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
+  ptr,
   <vscale x 16 x i8>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -3975,7 +3975,7 @@ define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0
 entry:
   call void @llvm.riscv.vsuxei.nxv16i32.nxv16i8(
     <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
+    ptr %1,
     <vscale x 16 x i8> %2,
     iXLen %3)
 
@@ -3984,12 +3984,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i8(
   <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
+  ptr,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -3998,7 +3998,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i3
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
+    ptr %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
     iXLen %4)
@@ -4008,11 +4008,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i8(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   <vscale x 1 x i8>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -4021,7 +4021,7 @@ define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, <v
 entry:
   call void @llvm.riscv.vsuxei.nxv1i64.nxv1i8(
     <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
+    ptr %1,
     <vscale x 1 x i8> %2,
     iXLen %3)
 
@@ -4030,12 +4030,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i8(
   <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
+  ptr,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -4044,7 +4044,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
+    ptr %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
     iXLen %4)
@@ -4054,11 +4054,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i8(
   <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
+  ptr,
   <vscale x 2 x i8>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -4067,7 +4067,7 @@ define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, <v
 entry:
   call void @llvm.riscv.vsuxei.nxv2i64.nxv2i8(
     <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
+    ptr %1,
     <vscale x 2 x i8> %2,
     iXLen %3)
 
@@ -4076,12 +4076,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i8(
   <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
+  ptr,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -4090,7 +4090,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
+    ptr %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
     iXLen %4)
@@ -4100,11 +4100,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i8(
   <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
+  ptr,
   <vscale x 4 x i8>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -4113,7 +4113,7 @@ define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, <v
 entry:
   call void @llvm.riscv.vsuxei.nxv4i64.nxv4i8(
     <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
+    ptr %1,
     <vscale x 4 x i8> %2,
     iXLen %3)
 
@@ -4122,12 +4122,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i8(
   <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
+  ptr,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -4136,7 +4136,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
+    ptr %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
     iXLen %4)
@@ -4146,11 +4146,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i8(
   <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
+  ptr,
   <vscale x 8 x i8>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -4159,7 +4159,7 @@ define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, <v
 entry:
   call void @llvm.riscv.vsuxei.nxv8i64.nxv8i8(
     <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
+    ptr %1,
     <vscale x 8 x i8> %2,
     iXLen %3)
 
@@ -4168,12 +4168,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i8(
   <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
+  ptr,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -4182,7 +4182,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
+    ptr %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
     iXLen %4)
@@ -4192,11 +4192,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i8(
   <vscale x 1 x half>,
-  <vscale x 1 x half>*,
+  ptr,
   <vscale x 1 x i8>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -4205,7 +4205,7 @@ define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <
 entry:
   call void @llvm.riscv.vsuxei.nxv1f16.nxv1i8(
     <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
+    ptr %1,
     <vscale x 1 x i8> %2,
     iXLen %3)
 
@@ -4214,12 +4214,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i8(
   <vscale x 1 x half>,
-  <vscale x 1 x half>*,
+  ptr,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -4228,7 +4228,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i8(
     <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
+    ptr %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
     iXLen %4)
@@ -4238,11 +4238,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i8(
   <vscale x 2 x half>,
-  <vscale x 2 x half>*,
+  ptr,
   <vscale x 2 x i8>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -4251,7 +4251,7 @@ define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <
 entry:
   call void @llvm.riscv.vsuxei.nxv2f16.nxv2i8(
     <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
+    ptr %1,
     <vscale x 2 x i8> %2,
     iXLen %3)
 
@@ -4260,12 +4260,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i8(
   <vscale x 2 x half>,
-  <vscale x 2 x half>*,
+  ptr,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -4274,7 +4274,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i8(
     <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
+    ptr %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
     iXLen %4)
@@ -4284,11 +4284,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i8(
   <vscale x 4 x half>,
-  <vscale x 4 x half>*,
+  ptr,
   <vscale x 4 x i8>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -4297,7 +4297,7 @@ define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <
 entry:
   call void @llvm.riscv.vsuxei.nxv4f16.nxv4i8(
     <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
+    ptr %1,
     <vscale x 4 x i8> %2,
     iXLen %3)
 
@@ -4306,12 +4306,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i8(
   <vscale x 4 x half>,
-  <vscale x 4 x half>*,
+  ptr,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -4320,7 +4320,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i8(
     <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
+    ptr %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
     iXLen %4)
@@ -4330,11 +4330,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i8(
   <vscale x 8 x half>,
-  <vscale x 8 x half>*,
+  ptr,
   <vscale x 8 x i8>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -4343,7 +4343,7 @@ define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <
 entry:
   call void @llvm.riscv.vsuxei.nxv8f16.nxv8i8(
     <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
+    ptr %1,
     <vscale x 8 x i8> %2,
     iXLen %3)
 
@@ -4352,12 +4352,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i8(
   <vscale x 8 x half>,
-  <vscale x 8 x half>*,
+  ptr,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -4366,7 +4366,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i8(
     <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
+    ptr %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
     iXLen %4)
@@ -4376,11 +4376,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv16f16.nxv16i8(
   <vscale x 16 x half>,
-  <vscale x 16 x half>*,
+  ptr,
   <vscale x 16 x i8>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -4389,7 +4389,7 @@ define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %
 entry:
   call void @llvm.riscv.vsuxei.nxv16f16.nxv16i8(
     <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
+    ptr %1,
     <vscale x 16 x i8> %2,
     iXLen %3)
 
@@ -4398,12 +4398,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i8(
   <vscale x 16 x half>,
-  <vscale x 16 x half>*,
+  ptr,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -4412,7 +4412,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x ha
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i8(
     <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
+    ptr %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
     iXLen %4)
@@ -4422,11 +4422,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv32f16.nxv32i8(
   <vscale x 32 x half>,
-  <vscale x 32 x half>*,
+  ptr,
   <vscale x 32 x i8>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -4435,7 +4435,7 @@ define void @intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %
 entry:
   call void @llvm.riscv.vsuxei.nxv32f16.nxv32i8(
     <vscale x 32 x half> %0,
-    <vscale x 32 x half>* %1,
+    ptr %1,
     <vscale x 32 x i8> %2,
     iXLen %3)
 
@@ -4444,12 +4444,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i8(
   <vscale x 32 x half>,
-  <vscale x 32 x half>*,
+  ptr,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -4458,7 +4458,7 @@ define void @intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x ha
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i8(
     <vscale x 32 x half> %0,
-    <vscale x 32 x half>* %1,
+    ptr %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
     iXLen %4)
@@ -4468,11 +4468,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i8(
   <vscale x 1 x float>,
-  <vscale x 1 x float>*,
+  ptr,
   <vscale x 1 x i8>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -4481,7 +4481,7 @@ define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0,
 entry:
   call void @llvm.riscv.vsuxei.nxv1f32.nxv1i8(
     <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
+    ptr %1,
     <vscale x 1 x i8> %2,
     iXLen %3)
 
@@ -4490,12 +4490,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i8(
   <vscale x 1 x float>,
-  <vscale x 1 x float>*,
+  ptr,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -4504,7 +4504,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i8(
     <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
+    ptr %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
     iXLen %4)
@@ -4514,11 +4514,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i8(
   <vscale x 2 x float>,
-  <vscale x 2 x float>*,
+  ptr,
   <vscale x 2 x i8>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -4527,7 +4527,7 @@ define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0,
 entry:
   call void @llvm.riscv.vsuxei.nxv2f32.nxv2i8(
     <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
+    ptr %1,
     <vscale x 2 x i8> %2,
     iXLen %3)
 
@@ -4536,12 +4536,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i8(
   <vscale x 2 x float>,
-  <vscale x 2 x float>*,
+  ptr,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -4550,7 +4550,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i8(
     <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
+    ptr %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
     iXLen %4)
@@ -4560,11 +4560,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i8(
   <vscale x 4 x float>,
-  <vscale x 4 x float>*,
+  ptr,
   <vscale x 4 x i8>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -4573,7 +4573,7 @@ define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0,
 entry:
   call void @llvm.riscv.vsuxei.nxv4f32.nxv4i8(
     <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
+    ptr %1,
     <vscale x 4 x i8> %2,
     iXLen %3)
 
@@ -4582,12 +4582,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i8(
   <vscale x 4 x float>,
-  <vscale x 4 x float>*,
+  ptr,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -4596,7 +4596,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i8(
     <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
+    ptr %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
     iXLen %4)
@@ -4606,11 +4606,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i8(
   <vscale x 8 x float>,
-  <vscale x 8 x float>*,
+  ptr,
   <vscale x 8 x i8>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -4619,7 +4619,7 @@ define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0,
 entry:
   call void @llvm.riscv.vsuxei.nxv8f32.nxv8i8(
     <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
+    ptr %1,
     <vscale x 8 x i8> %2,
     iXLen %3)
 
@@ -4628,12 +4628,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i8(
   <vscale x 8 x float>,
-  <vscale x 8 x float>*,
+  ptr,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -4642,7 +4642,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float>
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i8(
     <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
+    ptr %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
     iXLen %4)
@@ -4652,11 +4652,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv16f32.nxv16i8(
   <vscale x 16 x float>,
-  <vscale x 16 x float>*,
+  ptr,
   <vscale x 16 x i8>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -4665,7 +4665,7 @@ define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float>
 entry:
   call void @llvm.riscv.vsuxei.nxv16f32.nxv16i8(
     <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
+    ptr %1,
     <vscale x 16 x i8> %2,
     iXLen %3)
 
@@ -4674,12 +4674,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i8(
   <vscale x 16 x float>,
-  <vscale x 16 x float>*,
+  ptr,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -4688,7 +4688,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x fl
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i8(
     <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
+    ptr %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
     iXLen %4)
@@ -4698,11 +4698,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i8(
   <vscale x 1 x double>,
-  <vscale x 1 x double>*,
+  ptr,
   <vscale x 1 x i8>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -4711,7 +4711,7 @@ define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0,
 entry:
   call void @llvm.riscv.vsuxei.nxv1f64.nxv1i8(
     <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
+    ptr %1,
     <vscale x 1 x i8> %2,
     iXLen %3)
 
@@ -4720,12 +4720,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i8(
   <vscale x 1 x double>,
-  <vscale x 1 x double>*,
+  ptr,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -4734,7 +4734,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i8(
     <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
+    ptr %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
     iXLen %4)
@@ -4744,11 +4744,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i8(
   <vscale x 2 x double>,
-  <vscale x 2 x double>*,
+  ptr,
   <vscale x 2 x i8>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -4757,7 +4757,7 @@ define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0,
 entry:
   call void @llvm.riscv.vsuxei.nxv2f64.nxv2i8(
     <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
+    ptr %1,
     <vscale x 2 x i8> %2,
     iXLen %3)
 
@@ -4766,12 +4766,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i8(
   <vscale x 2 x double>,
-  <vscale x 2 x double>*,
+  ptr,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -4780,7 +4780,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i8(
     <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
+    ptr %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
     iXLen %4)
@@ -4790,11 +4790,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i8(
   <vscale x 4 x double>,
-  <vscale x 4 x double>*,
+  ptr,
   <vscale x 4 x i8>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -4803,7 +4803,7 @@ define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0,
 entry:
   call void @llvm.riscv.vsuxei.nxv4f64.nxv4i8(
     <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
+    ptr %1,
     <vscale x 4 x i8> %2,
     iXLen %3)
 
@@ -4812,12 +4812,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i8(
   <vscale x 4 x double>,
-  <vscale x 4 x double>*,
+  ptr,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -4826,7 +4826,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i8(
     <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
+    ptr %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
     iXLen %4)
@@ -4836,11 +4836,11 @@ entry:
 
 declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i8(
   <vscale x 8 x double>,
-  <vscale x 8 x double>*,
+  ptr,
   <vscale x 8 x i8>,
   iXLen);
 
-define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -4849,7 +4849,7 @@ define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0,
 entry:
   call void @llvm.riscv.vsuxei.nxv8f64.nxv8i8(
     <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
+    ptr %1,
     <vscale x 8 x i8> %2,
     iXLen %3)
 
@@ -4858,12 +4858,12 @@ entry:
 
 declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i8(
   <vscale x 8 x double>,
-  <vscale x 8 x double>*,
+  ptr,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
   iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -4872,7 +4872,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i8(
     <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
+    ptr %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
     iXLen %4)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vxrm-insert.ll b/llvm/test/CodeGen/RISCV/rvv/vxrm-insert.ll
index 215b1ddd5de35..10175218a4409 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vxrm-insert.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vxrm-insert.ll
@@ -385,7 +385,7 @@ mergeblock:
 }
 
 ; Test loop with no dominating vxrm write.
-define void @test10(i8* nocapture %ptr_dest, i8* nocapture readonly %ptr_op1, i8* nocapture readonly %ptr_op2, iXLen %n) {
+define void @test10(ptr nocapture %ptr_dest, ptr nocapture readonly %ptr_op1, ptr nocapture readonly %ptr_op2, iXLen %n) {
 ; CHECK-LABEL: test10:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    beqz a3, .LBB9_3
@@ -422,11 +422,11 @@ for.end:
 }
 
 declare iXLen @llvm.riscv.vsetvli.iXLen(iXLen, iXLen immarg, iXLen immarg)
-declare <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8.iXLen(<vscale x 1 x i8>, <vscale x 1 x i8>* nocapture, iXLen)
-declare void @llvm.riscv.vse.nxv1i8.iXLen(<vscale x 1 x i8>, <vscale x 1 x i8>* nocapture, iXLen)
+declare <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8.iXLen(<vscale x 1 x i8>, ptr nocapture, iXLen)
+declare void @llvm.riscv.vse.nxv1i8.iXLen(<vscale x 1 x i8>, ptr nocapture, iXLen)
 
 ; Test loop with dominating vxrm write. Make sure there is no write in the loop.
-define void @test11(i8* nocapture %ptr_dest, i8* nocapture readonly %ptr_op1, i8* nocapture readonly %ptr_op2, iXLen %n) {
+define void @test11(ptr nocapture %ptr_dest, ptr nocapture readonly %ptr_op1, ptr nocapture readonly %ptr_op2, iXLen %n) {
 ; CHECK-LABEL: test11:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a4, a3, e8, mf8, ta, ma

diff  --git a/llvm/test/CodeGen/RISCV/rvv/wrong-stack-offset-for-rvv-object.mir b/llvm/test/CodeGen/RISCV/rvv/wrong-stack-offset-for-rvv-object.mir
index e629727d26c35..06ed46f291a83 100644
--- a/llvm/test/CodeGen/RISCV/rvv/wrong-stack-offset-for-rvv-object.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/wrong-stack-offset-for-rvv-object.mir
@@ -32,27 +32,27 @@
 
   %struct = type { i32 }
 
-  define void @asm_fprintf(%struct %file, i8* %p, [10 x i8]* %buf, i8* %arrayidx3, <2 x i8>* %0, i8* %1, ...) #0 {
+  define void @asm_fprintf(%struct %file, ptr %p, ptr %buf, ptr %arrayidx3, ptr %0, ptr %1, ...) #0 {
   entry:
     %buf1 = alloca [10 x i8], i32 0, align 8
-    %arrayidx32 = getelementptr inbounds [10 x i8], [10 x i8]* %buf, i64 0, i64 1
+    %arrayidx32 = getelementptr inbounds [10 x i8], ptr %buf, i64 0, i64 1
     br label %while.cond
 
   while.cond:                                       ; preds = %while.cond, %sw.bb, %entry
-    %incdec.ptr = getelementptr inbounds i8, i8* undef, i64 1
-    %2 = load i8, i8* null, align 1
+    %incdec.ptr = getelementptr inbounds i8, ptr undef, i64 1
+    %2 = load i8, ptr null, align 1
     %3 = zext i8 0 to i64
     %cond = icmp eq i64 %3, 0
     br i1 %cond, label %sw.bb, label %while.cond
 
   sw.bb:                                            ; preds = %while.cond
-    %4 = load i8, i8* null, align 1
-    store <2 x i8> zeroinitializer, <2 x i8>* %0, align 1
-    %call = call i32 (i8*, ...) @fprintf(i8* %p)
+    %4 = load i8, ptr null, align 1
+    store <2 x i8> zeroinitializer, ptr %0, align 1
+    %call = call i32 (ptr, ...) @fprintf(ptr %p)
     br label %while.cond
   }
 
-  declare i32 @fprintf(i8*, ...) #0
+  declare i32 @fprintf(ptr, ...) #0
 
   attributes #0 = { "target-features"="+m,+v" }
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/zve32-types.ll b/llvm/test/CodeGen/RISCV/rvv/zve32-types.ll
index f632d01cf183b..6fcd4dc237853 100644
--- a/llvm/test/CodeGen/RISCV/rvv/zve32-types.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/zve32-types.ll
@@ -10,7 +10,7 @@
 ; widened by using the next larger LMUL and operating on the whole vector. This
 ; isn't optimal, but doesn't crash.
 
-define void @vadd_vv_nxv1i8(<vscale x 1 x i8>* %pa, <vscale x 1 x i8>* %pb) {
+define void @vadd_vv_nxv1i8(ptr %pa, ptr %pb) {
 ; CHECK-LABEL: vadd_vv_nxv1i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a2, vlenb
@@ -23,14 +23,14 @@ define void @vadd_vv_nxv1i8(<vscale x 1 x i8>* %pa, <vscale x 1 x i8>* %pb) {
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, ma
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %va = load <vscale x 1 x i8>, <vscale x 1 x i8>* %pa
-  %vb = load <vscale x 1 x i8>, <vscale x 1 x i8>* %pb
+  %va = load <vscale x 1 x i8>, ptr %pa
+  %vb = load <vscale x 1 x i8>, ptr %pb
   %vc = add <vscale x 1 x i8> %va, %vb
-  store <vscale x 1 x i8> %vc, <vscale x 1 x i8>* %pa
+  store <vscale x 1 x i8> %vc, ptr %pa
   ret void
 }
 
-define void @vadd_vv_nxv1i16(<vscale x 1 x i16>* %pa, <vscale x 1 x i16>* %pb) {
+define void @vadd_vv_nxv1i16(ptr %pa, ptr %pb) {
 ; CHECK-LABEL: vadd_vv_nxv1i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a2, vlenb
@@ -43,14 +43,14 @@ define void @vadd_vv_nxv1i16(<vscale x 1 x i16>* %pa, <vscale x 1 x i16>* %pb) {
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %va = load <vscale x 1 x i16>, <vscale x 1 x i16>* %pa
-  %vb = load <vscale x 1 x i16>, <vscale x 1 x i16>* %pb
+  %va = load <vscale x 1 x i16>, ptr %pa
+  %vb = load <vscale x 1 x i16>, ptr %pb
   %vc = add <vscale x 1 x i16> %va, %vb
-  store <vscale x 1 x i16> %vc, <vscale x 1 x i16>* %pa
+  store <vscale x 1 x i16> %vc, ptr %pa
   ret void
 }
 
-define void @vadd_vv_nxv1i32(<vscale x 1 x i32>* %pa, <vscale x 1 x i32>* %pb) {
+define void @vadd_vv_nxv1i32(ptr %pa, ptr %pb) {
 ; CHECK-LABEL: vadd_vv_nxv1i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a2, vlenb
@@ -63,14 +63,14 @@ define void @vadd_vv_nxv1i32(<vscale x 1 x i32>* %pa, <vscale x 1 x i32>* %pb) {
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %va = load <vscale x 1 x i32>, <vscale x 1 x i32>* %pa
-  %vb = load <vscale x 1 x i32>, <vscale x 1 x i32>* %pb
+  %va = load <vscale x 1 x i32>, ptr %pa
+  %vb = load <vscale x 1 x i32>, ptr %pb
   %vc = add <vscale x 1 x i32> %va, %vb
-  store <vscale x 1 x i32> %vc, <vscale x 1 x i32>* %pa
+  store <vscale x 1 x i32> %vc, ptr %pa
   ret void
 }
 
-define void @vfadd_vv_nxv1f32(<vscale x 1 x float>* %pa, <vscale x 1 x float>* %pb) {
+define void @vfadd_vv_nxv1f32(ptr %pa, ptr %pb) {
 ; CHECK-LABEL: vfadd_vv_nxv1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a2, vlenb
@@ -83,9 +83,9 @@ define void @vfadd_vv_nxv1f32(<vscale x 1 x float>* %pa, <vscale x 1 x float>* %
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %va = load <vscale x 1 x float>, <vscale x 1 x float>* %pa
-  %vb = load <vscale x 1 x float>, <vscale x 1 x float>* %pb
+  %va = load <vscale x 1 x float>, ptr %pa
+  %vb = load <vscale x 1 x float>, ptr %pb
   %vc = fadd <vscale x 1 x float> %va, %vb
-  store <vscale x 1 x float> %vc, <vscale x 1 x float>* %pa
+  store <vscale x 1 x float> %vc, ptr %pa
   ret void
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir
index 2971b9ee59e74..d0b76e7e4535b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir
@@ -5,7 +5,7 @@
   target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n64-S128"
   target triple = "riscv64"
 
-  define void @zvlsseg_spill(i64 *%base, i64 %vl) {
+  define void @zvlsseg_spill(ptr %base, i64 %vl) {
     ret void
   }
 ...

diff  --git a/llvm/test/CodeGen/RISCV/stack-realignment.ll b/llvm/test/CodeGen/RISCV/stack-realignment.ll
index 4feb91cace4ab..034ebadc76af2 100644
--- a/llvm/test/CodeGen/RISCV/stack-realignment.ll
+++ b/llvm/test/CodeGen/RISCV/stack-realignment.ll
@@ -73,7 +73,7 @@ define void @caller16() {
 ; RV64I-LP64E-NEXT:    addi sp, sp, 32
 ; RV64I-LP64E-NEXT:    ret
   %1 = alloca i8, align 16
-  call void @callee(i8* %1)
+  call void @callee(ptr %1)
   ret void
 }
 
@@ -126,7 +126,7 @@ define void @caller_no_realign16() "no-realign-stack" {
 ; RV64I-LP64E-NEXT:    addi sp, sp, 16
 ; RV64I-LP64E-NEXT:    ret
   %1 = alloca i8, align 16
-  call void @callee(i8* %1)
+  call void @callee(ptr %1)
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/RISCV/vararg-ilp32e.ll b/llvm/test/CodeGen/RISCV/vararg-ilp32e.ll
index c45eb3738e6e2..18bb4f5ad0f0c 100644
--- a/llvm/test/CodeGen/RISCV/vararg-ilp32e.ll
+++ b/llvm/test/CodeGen/RISCV/vararg-ilp32e.ll
@@ -4,8 +4,8 @@
 ; RUN: llc -mtriple=riscv32 -target-abi ilp32e -frame-pointer=all -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefix=ILP32E-WITHFP %s
 
-declare void @llvm.va_start(i8*)
-declare void @llvm.va_end(i8*)
+declare void @llvm.va_start(ptr)
+declare void @llvm.va_end(ptr)
 declare void @abort()
 
 define i32 @caller(i32 %a) {
@@ -122,18 +122,18 @@ define void @va_double(i32 %n, ...) {
 ; ILP32E-WITHFP-NEXT:  .LBB1_2: # %if.then
 ; ILP32E-WITHFP-NEXT:    call abort
 entry:
-  %args = alloca i8*, align 4
-  %args1 = bitcast i8** %args to i8*
-  call void @llvm.va_start(i8* %args1)
-  %argp.cur = load i8*, i8** %args, align 4
-  %0 = ptrtoint i8* %argp.cur to i32
+  %args = alloca ptr, align 4
+  %args1 = bitcast ptr %args to ptr
+  call void @llvm.va_start(ptr %args1)
+  %argp.cur = load ptr, ptr %args, align 4
+  %0 = ptrtoint ptr %argp.cur to i32
   %1 = add i32 %0, 7
   %2 = and i32 %1, -8
-  %argp.cur.aligned = inttoptr i32 %2 to i8*
-  %argp.next = getelementptr inbounds i8, i8* %argp.cur.aligned, i32 8
-  store i8* %argp.next, i8** %args, align 4
-  %3 = bitcast i8* %argp.cur.aligned to double*
-  %4 = load double, double* %3, align 8
+  %argp.cur.aligned = inttoptr i32 %2 to ptr
+  %argp.next = getelementptr inbounds i8, ptr %argp.cur.aligned, i32 8
+  store ptr %argp.next, ptr %args, align 4
+  %3 = bitcast ptr %argp.cur.aligned to ptr
+  %4 = load double, ptr %3, align 8
   %cmp = fcmp une double %4, 2.000000e+00
   br i1 %cmp, label %if.then, label %if.end
 
@@ -142,7 +142,7 @@ if.then:
   unreachable
 
 if.end:
-  %args2 = bitcast i8** %args to i8*
-  call void @llvm.va_end(i8* %args2)
+  %args2 = bitcast ptr %args to ptr
+  call void @llvm.va_end(ptr %args2)
   ret void
 }

diff  --git a/llvm/test/CodeGen/RISCV/xtheadfmemidx.ll b/llvm/test/CodeGen/RISCV/xtheadfmemidx.ll
index 0b908c73bdd31..cdaae23dbd53e 100644
--- a/llvm/test/CodeGen/RISCV/xtheadfmemidx.ll
+++ b/llvm/test/CodeGen/RISCV/xtheadfmemidx.ll
@@ -4,7 +4,7 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+d -mattr=+xtheadfmemidx -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s -check-prefix=RV64XTHEADFMEMIDX
 
-define float @flrw(float* %a, i64 %b) {
+define float @flrw(ptr %a, i64 %b) {
 ; RV32XTHEADMEMIDX-LABEL: flrw:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    th.flrw fa5, a0, a1, 2
@@ -16,13 +16,13 @@ define float @flrw(float* %a, i64 %b) {
 ; RV64XTHEADFMEMIDX-NEXT:    th.flrw fa5, a0, a1, 2
 ; RV64XTHEADFMEMIDX-NEXT:    fadd.s fa0, fa5, fa5
 ; RV64XTHEADFMEMIDX-NEXT:    ret
-  %1 = getelementptr float, float* %a, i64 %b
-  %2 = load float, float* %1, align 4
+  %1 = getelementptr float, ptr %a, i64 %b
+  %2 = load float, ptr %1, align 4
   %3 = fadd float %2, %2
   ret float %3
 }
 
-define float @flurw(float* %a, i32 %b) {
+define float @flurw(ptr %a, i32 %b) {
 ; RV32XTHEADMEMIDX-LABEL: flurw:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    th.flrw fa5, a0, a1, 2
@@ -35,13 +35,13 @@ define float @flurw(float* %a, i32 %b) {
 ; RV64XTHEADFMEMIDX-NEXT:    fadd.s fa0, fa5, fa5
 ; RV64XTHEADFMEMIDX-NEXT:    ret
   %1 = zext i32 %b to i64
-  %2 = getelementptr float, float* %a, i64 %1
-  %3 = load float, float* %2, align 4
+  %2 = getelementptr float, ptr %a, i64 %1
+  %3 = load float, ptr %2, align 4
   %4 = fadd float %3, %3
   ret float %4
 }
 
-define void @fsrw(float* %a, i64 %b, float %c) {
+define void @fsrw(ptr %a, i64 %b, float %c) {
 ; RV32XTHEADMEMIDX-LABEL: fsrw:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    fadd.s fa5, fa0, fa0
@@ -54,12 +54,12 @@ define void @fsrw(float* %a, i64 %b, float %c) {
 ; RV64XTHEADFMEMIDX-NEXT:    th.fsrw fa5, a0, a1, 2
 ; RV64XTHEADFMEMIDX-NEXT:    ret
   %1 = fadd float %c, %c
-  %2 = getelementptr float, float* %a, i64 %b
-  store float %1, float* %2, align 4
+  %2 = getelementptr float, ptr %a, i64 %b
+  store float %1, ptr %2, align 4
   ret void
 }
 
-define void @fsurw(float* %a, i32 %b, float %c) {
+define void @fsurw(ptr %a, i32 %b, float %c) {
 ; RV32XTHEADMEMIDX-LABEL: fsurw:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    fadd.s fa5, fa0, fa0
@@ -73,12 +73,12 @@ define void @fsurw(float* %a, i32 %b, float %c) {
 ; RV64XTHEADFMEMIDX-NEXT:    ret
   %1 = zext i32 %b to i64
   %2 = fadd float %c, %c
-  %3 = getelementptr float, float* %a, i64 %1
-  store float %2, float* %3, align 4
+  %3 = getelementptr float, ptr %a, i64 %1
+  store float %2, ptr %3, align 4
   ret void
 }
 
-define double @flrd(double* %a, i64 %b) {
+define double @flrd(ptr %a, i64 %b) {
 ; RV32XTHEADMEMIDX-LABEL: flrd:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    th.flrd fa5, a0, a1, 3
@@ -90,13 +90,13 @@ define double @flrd(double* %a, i64 %b) {
 ; RV64XTHEADFMEMIDX-NEXT:    th.flrd fa5, a0, a1, 3
 ; RV64XTHEADFMEMIDX-NEXT:    fadd.d fa0, fa5, fa5
 ; RV64XTHEADFMEMIDX-NEXT:    ret
-  %1 = getelementptr double, double* %a, i64 %b
-  %2 = load double, double* %1, align 8
+  %1 = getelementptr double, ptr %a, i64 %b
+  %2 = load double, ptr %1, align 8
   %3 = fadd double %2, %2
   ret double %3
 }
 
-define double @flurd(double* %a, i32 %b) {
+define double @flurd(ptr %a, i32 %b) {
 ; RV32XTHEADMEMIDX-LABEL: flurd:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    th.flrd fa5, a0, a1, 3
@@ -109,13 +109,13 @@ define double @flurd(double* %a, i32 %b) {
 ; RV64XTHEADFMEMIDX-NEXT:    fadd.d fa0, fa5, fa5
 ; RV64XTHEADFMEMIDX-NEXT:    ret
   %1 = zext i32 %b to i64
-  %2 = getelementptr double, double* %a, i64 %1
-  %3 = load double, double* %2, align 8
+  %2 = getelementptr double, ptr %a, i64 %1
+  %3 = load double, ptr %2, align 8
   %4 = fadd double %3, %3
   ret double %4
 }
 
-define void @fsrd(double* %a, i64 %b, double %c) {
+define void @fsrd(ptr %a, i64 %b, double %c) {
 ; RV32XTHEADMEMIDX-LABEL: fsrd:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    fadd.d fa5, fa0, fa0
@@ -128,12 +128,12 @@ define void @fsrd(double* %a, i64 %b, double %c) {
 ; RV64XTHEADFMEMIDX-NEXT:    th.fsrd fa5, a0, a1, 3
 ; RV64XTHEADFMEMIDX-NEXT:    ret
   %1 = fadd double %c, %c
-  %2 = getelementptr double, double* %a, i64 %b
-  store double %1, double* %2, align 8
+  %2 = getelementptr double, ptr %a, i64 %b
+  store double %1, ptr %2, align 8
   ret void
 }
 
-define void @fsurd(double* %a, i32 %b, double %c) {
+define void @fsurd(ptr %a, i32 %b, double %c) {
 ; RV32XTHEADMEMIDX-LABEL: fsurd:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    fadd.d fa5, fa0, fa0
@@ -147,7 +147,7 @@ define void @fsurd(double* %a, i32 %b, double %c) {
 ; RV64XTHEADFMEMIDX-NEXT:    ret
   %1 = zext i32 %b to i64
   %2 = fadd double %c, %c
-  %3 = getelementptr double, double* %a, i64 %1
-  store double %2, double* %3, align 8
+  %3 = getelementptr double, ptr %a, i64 %1
+  store double %2, ptr %3, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/RISCV/xtheadmemidx.ll b/llvm/test/CodeGen/RISCV/xtheadmemidx.ll
index f6f01236eacee..46aa383866e93 100644
--- a/llvm/test/CodeGen/RISCV/xtheadmemidx.ll
+++ b/llvm/test/CodeGen/RISCV/xtheadmemidx.ll
@@ -4,7 +4,7 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+d -mattr=+xtheadmemidx -mattr=+m -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s -check-prefix=RV64XTHEADMEMIDX
 
-define i8* @lbia(i8* %base, i8* %addr.2, i8 %a) {
+define ptr @lbia(ptr %base, ptr %addr.2, i8 %a) {
 ; RV32XTHEADMEMIDX-LABEL: lbia:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    th.lbia a3, (a0), -1, 0
@@ -18,15 +18,15 @@ define i8* @lbia(i8* %base, i8* %addr.2, i8 %a) {
 ; RV64XTHEADMEMIDX-NEXT:    add a2, a3, a2
 ; RV64XTHEADMEMIDX-NEXT:    sb a2, 0(a1)
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %addr = getelementptr i8, i8* %base, i8 0
-  %ld = load i8, i8* %addr
-  %addr.1 = getelementptr i8, i8* %base, i8 -1
+  %addr = getelementptr i8, ptr %base, i8 0
+  %ld = load i8, ptr %addr
+  %addr.1 = getelementptr i8, ptr %base, i8 -1
   %res = add i8 %ld, %a
-  store i8 %res, i8* %addr.2
-  ret i8* %addr.1
+  store i8 %res, ptr %addr.2
+  ret ptr %addr.1
 }
 
-define i8* @lbib(i8* %base, i8 %a) {
+define ptr @lbib(ptr %base, i8 %a) {
 ; RV32XTHEADMEMIDX-LABEL: lbib:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    th.lbib a2, (a0), 1, 0
@@ -40,15 +40,15 @@ define i8* @lbib(i8* %base, i8 %a) {
 ; RV64XTHEADMEMIDX-NEXT:    add a1, a2, a1
 ; RV64XTHEADMEMIDX-NEXT:    sb a1, 1(a0)
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %addr = getelementptr i8, i8* %base, i8 1
-  %ld = load i8, i8* %addr
-  %addr.1 = getelementptr i8, i8* %base, i8 2
+  %addr = getelementptr i8, ptr %base, i8 1
+  %ld = load i8, ptr %addr
+  %addr.1 = getelementptr i8, ptr %base, i8 2
   %res = add i8 %ld, %a
-  store i8 %res, i8* %addr.1
-  ret i8* %addr
+  store i8 %res, ptr %addr.1
+  ret ptr %addr
 }
 
-define i8* @lbuia(i8* %base, i64* %addr.2, i64 %a) {
+define ptr @lbuia(ptr %base, ptr %addr.2, i64 %a) {
 ; RV32XTHEADMEMIDX-LABEL: lbuia:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    th.lbuia a4, (a0), -1, 0
@@ -65,16 +65,16 @@ define i8* @lbuia(i8* %base, i64* %addr.2, i64 %a) {
 ; RV64XTHEADMEMIDX-NEXT:    add a2, a3, a2
 ; RV64XTHEADMEMIDX-NEXT:    sd a2, 0(a1)
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %addr = getelementptr i8, i8* %base, i8 0
-  %ld = load i8, i8* %addr
+  %addr = getelementptr i8, ptr %base, i8 0
+  %ld = load i8, ptr %addr
   %zext = zext i8 %ld to i64
-  %addr.1 = getelementptr i8, i8* %base, i8 -1
+  %addr.1 = getelementptr i8, ptr %base, i8 -1
   %res = add i64 %zext, %a
-  store i64 %res, i64* %addr.2
-  ret i8* %addr.1
+  store i64 %res, ptr %addr.2
+  ret ptr %addr.1
 }
 
-define i8* @lbuib(i8* %base, i64 %a, i64* %addr.1) {
+define ptr @lbuib(ptr %base, i64 %a, ptr %addr.1) {
 ; RV32XTHEADMEMIDX-LABEL: lbuib:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    th.lbuib a4, (a0), 1, 0
@@ -91,15 +91,15 @@ define i8* @lbuib(i8* %base, i64 %a, i64* %addr.1) {
 ; RV64XTHEADMEMIDX-NEXT:    add a1, a3, a1
 ; RV64XTHEADMEMIDX-NEXT:    sd a1, 0(a2)
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %addr = getelementptr i8, i8* %base, i8 1
-  %ld = load i8, i8* %addr
+  %addr = getelementptr i8, ptr %base, i8 1
+  %ld = load i8, ptr %addr
   %zext = zext i8 %ld to i64
   %res = add i64 %zext, %a
-  store i64 %res, i64* %addr.1
-  ret i8* %addr
+  store i64 %res, ptr %addr.1
+  ret ptr %addr
 }
 
-define i16* @lhia(i16* %base, i16* %addr.2, i16 %a) {
+define ptr @lhia(ptr %base, ptr %addr.2, i16 %a) {
 ; RV32XTHEADMEMIDX-LABEL: lhia:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    th.lhia a3, (a0), -16, 1
@@ -113,15 +113,15 @@ define i16* @lhia(i16* %base, i16* %addr.2, i16 %a) {
 ; RV64XTHEADMEMIDX-NEXT:    add a2, a3, a2
 ; RV64XTHEADMEMIDX-NEXT:    sh a2, 0(a1)
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %addr = getelementptr i16, i16* %base, i16 0
-  %ld = load i16, i16* %addr
-  %addr.1 = getelementptr i16, i16* %base, i16 -16
+  %addr = getelementptr i16, ptr %base, i16 0
+  %ld = load i16, ptr %addr
+  %addr.1 = getelementptr i16, ptr %base, i16 -16
   %res = add i16 %ld, %a
-  store i16 %res, i16* %addr.2
-  ret i16* %addr.1
+  store i16 %res, ptr %addr.2
+  ret ptr %addr.1
 }
 
-define i16* @lhib(i16* %base, i16 %a) {
+define ptr @lhib(ptr %base, i16 %a) {
 ; RV32XTHEADMEMIDX-LABEL: lhib:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    th.lhib a2, (a0), 2, 0
@@ -135,15 +135,15 @@ define i16* @lhib(i16* %base, i16 %a) {
 ; RV64XTHEADMEMIDX-NEXT:    add a1, a2, a1
 ; RV64XTHEADMEMIDX-NEXT:    sh a1, 2(a0)
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %addr = getelementptr i16, i16* %base, i16 1
-  %ld = load i16, i16* %addr
-  %addr.1 = getelementptr i16, i16* %base, i16 2
+  %addr = getelementptr i16, ptr %base, i16 1
+  %ld = load i16, ptr %addr
+  %addr.1 = getelementptr i16, ptr %base, i16 2
   %res = add i16 %ld, %a
-  store i16 %res, i16* %addr.1
-  ret i16* %addr
+  store i16 %res, ptr %addr.1
+  ret ptr %addr
 }
 
-define i16* @lhuia(i16* %base, i64* %addr.2, i64 %a) {
+define ptr @lhuia(ptr %base, ptr %addr.2, i64 %a) {
 ; RV32XTHEADMEMIDX-LABEL: lhuia:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    th.lhuia a4, (a0), -16, 1
@@ -160,16 +160,16 @@ define i16* @lhuia(i16* %base, i64* %addr.2, i64 %a) {
 ; RV64XTHEADMEMIDX-NEXT:    add a2, a3, a2
 ; RV64XTHEADMEMIDX-NEXT:    sd a2, 0(a1)
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %addr = getelementptr i16, i16* %base, i16 0
-  %ld = load i16, i16* %addr
+  %addr = getelementptr i16, ptr %base, i16 0
+  %ld = load i16, ptr %addr
   %zext = zext i16 %ld to i64
-  %addr.1 = getelementptr i16, i16* %base, i16 -16
+  %addr.1 = getelementptr i16, ptr %base, i16 -16
   %res = add i64 %zext, %a
-  store i64 %res, i64* %addr.2
-  ret i16* %addr.1
+  store i64 %res, ptr %addr.2
+  ret ptr %addr.1
 }
 
-define i16* @lhuib(i16* %base, i64 %a, i64* %addr.1) {
+define ptr @lhuib(ptr %base, i64 %a, ptr %addr.1) {
 ; RV32XTHEADMEMIDX-LABEL: lhuib:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    th.lhuib a4, (a0), 2, 0
@@ -186,15 +186,15 @@ define i16* @lhuib(i16* %base, i64 %a, i64* %addr.1) {
 ; RV64XTHEADMEMIDX-NEXT:    add a1, a3, a1
 ; RV64XTHEADMEMIDX-NEXT:    sd a1, 0(a2)
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %addr = getelementptr i16, i16* %base, i16 1
-  %ld = load i16, i16* %addr
+  %addr = getelementptr i16, ptr %base, i16 1
+  %ld = load i16, ptr %addr
   %zext = zext i16 %ld to i64
   %res = add i64 %zext, %a
-  store i64 %res, i64* %addr.1
-  ret i16* %addr
+  store i64 %res, ptr %addr.1
+  ret ptr %addr
 }
 
-define i32* @lwia(i32* %base, i32* %addr.2, i32 %a) {
+define ptr @lwia(ptr %base, ptr %addr.2, i32 %a) {
 ; RV32XTHEADMEMIDX-LABEL: lwia:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    th.lwia a3, (a0), -16, 2
@@ -208,15 +208,15 @@ define i32* @lwia(i32* %base, i32* %addr.2, i32 %a) {
 ; RV64XTHEADMEMIDX-NEXT:    add a2, a3, a2
 ; RV64XTHEADMEMIDX-NEXT:    sw a2, 0(a1)
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %addr = getelementptr i32, i32* %base, i32 0
-  %ld = load i32, i32* %addr
-  %addr.1 = getelementptr i32, i32* %base, i32 -16
+  %addr = getelementptr i32, ptr %base, i32 0
+  %ld = load i32, ptr %addr
+  %addr.1 = getelementptr i32, ptr %base, i32 -16
   %res = add i32 %ld, %a
-  store i32 %res, i32* %addr.2
-  ret i32* %addr.1
+  store i32 %res, ptr %addr.2
+  ret ptr %addr.1
 }
 
-define i32* @lwib(i32* %base, i32 %a) {
+define ptr @lwib(ptr %base, i32 %a) {
 ; RV32XTHEADMEMIDX-LABEL: lwib:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    th.lwib a2, (a0), 4, 0
@@ -230,15 +230,15 @@ define i32* @lwib(i32* %base, i32 %a) {
 ; RV64XTHEADMEMIDX-NEXT:    add a1, a2, a1
 ; RV64XTHEADMEMIDX-NEXT:    sw a1, 4(a0)
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %addr = getelementptr i32, i32* %base, i32 1
-  %ld = load i32, i32* %addr
-  %addr.1 = getelementptr i32, i32* %base, i32 2
+  %addr = getelementptr i32, ptr %base, i32 1
+  %ld = load i32, ptr %addr
+  %addr.1 = getelementptr i32, ptr %base, i32 2
   %res = add i32 %ld, %a
-  store i32 %res, i32* %addr.1
-  ret i32* %addr
+  store i32 %res, ptr %addr.1
+  ret ptr %addr
 }
 
-define i32* @lwuia(i32* %base, i64* %addr.2, i64 %a) {
+define ptr @lwuia(ptr %base, ptr %addr.2, i64 %a) {
 ; RV32XTHEADMEMIDX-LABEL: lwuia:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    th.lwia a4, (a0), -16, 2
@@ -255,16 +255,16 @@ define i32* @lwuia(i32* %base, i64* %addr.2, i64 %a) {
 ; RV64XTHEADMEMIDX-NEXT:    add a2, a3, a2
 ; RV64XTHEADMEMIDX-NEXT:    sd a2, 0(a1)
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %addr = getelementptr i32, i32* %base, i32 0
-  %ld = load i32, i32* %addr
+  %addr = getelementptr i32, ptr %base, i32 0
+  %ld = load i32, ptr %addr
   %zext = zext i32 %ld to i64
-  %addr.1 = getelementptr i32, i32* %base, i32 -16
+  %addr.1 = getelementptr i32, ptr %base, i32 -16
   %res = add i64 %zext, %a
-  store i64 %res, i64* %addr.2
-  ret i32* %addr.1
+  store i64 %res, ptr %addr.2
+  ret ptr %addr.1
 }
 
-define i32* @lwuib(i32* %base, i64 %a, i64* %addr.1) {
+define ptr @lwuib(ptr %base, i64 %a, ptr %addr.1) {
 ; RV32XTHEADMEMIDX-LABEL: lwuib:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    th.lwib a4, (a0), 4, 0
@@ -281,15 +281,15 @@ define i32* @lwuib(i32* %base, i64 %a, i64* %addr.1) {
 ; RV64XTHEADMEMIDX-NEXT:    add a1, a3, a1
 ; RV64XTHEADMEMIDX-NEXT:    sd a1, 0(a2)
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %addr = getelementptr i32, i32* %base, i32 1
-  %ld = load i32, i32* %addr
+  %addr = getelementptr i32, ptr %base, i32 1
+  %ld = load i32, ptr %addr
   %zext = zext i32 %ld to i64
   %res = add i64 %zext, %a
-  store i64 %res, i64* %addr.1
-  ret i32* %addr
+  store i64 %res, ptr %addr.1
+  ret ptr %addr
 }
 
-define i64* @ldia(i64* %base, i64* %addr.2, i64 %a) {
+define ptr @ldia(ptr %base, ptr %addr.2, i64 %a) {
 ; RV32XTHEADMEMIDX-LABEL: ldia:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    lw a4, 4(a0)
@@ -309,15 +309,15 @@ define i64* @ldia(i64* %base, i64* %addr.2, i64 %a) {
 ; RV64XTHEADMEMIDX-NEXT:    add a2, a3, a2
 ; RV64XTHEADMEMIDX-NEXT:    sd a2, 0(a1)
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %addr = getelementptr i64, i64* %base, i64 0
-  %ld = load i64, i64* %addr
-  %addr.1 = getelementptr i64, i64* %base, i64 -16
+  %addr = getelementptr i64, ptr %base, i64 0
+  %ld = load i64, ptr %addr
+  %addr.1 = getelementptr i64, ptr %base, i64 -16
   %res = add i64 %ld, %a
-  store i64 %res, i64* %addr.2
-  ret i64* %addr.1
+  store i64 %res, ptr %addr.2
+  ret ptr %addr.1
 }
 
-define i64* @ldib(i64* %base, i64 %a) {
+define ptr @ldib(ptr %base, i64 %a) {
 ; RV32XTHEADMEMIDX-LABEL: ldib:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    th.lwib a3, (a0), 8, 0
@@ -336,15 +336,15 @@ define i64* @ldib(i64* %base, i64 %a) {
 ; RV64XTHEADMEMIDX-NEXT:    add a1, a2, a1
 ; RV64XTHEADMEMIDX-NEXT:    sd a1, 8(a0)
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %addr = getelementptr i64, i64* %base, i64 1
-  %ld = load i64, i64* %addr
-  %addr.1 = getelementptr i64, i64* %base, i64 2
+  %addr = getelementptr i64, ptr %base, i64 1
+  %ld = load i64, ptr %addr
+  %addr.1 = getelementptr i64, ptr %base, i64 2
   %res = add i64 %ld, %a
-  store i64 %res, i64* %addr.1
-  ret i64* %addr
+  store i64 %res, ptr %addr.1
+  ret ptr %addr
 }
 
-define i8* @sbia(i8* %base, i8 %a, i8 %b) {
+define ptr @sbia(ptr %base, i8 %a, i8 %b) {
 ; RV32XTHEADMEMIDX-LABEL: sbia:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    add a1, a1, a2
@@ -356,13 +356,13 @@ define i8* @sbia(i8* %base, i8 %a, i8 %b) {
 ; RV64XTHEADMEMIDX-NEXT:    add a1, a1, a2
 ; RV64XTHEADMEMIDX-NEXT:    th.sbia a1, (a0), 1, 0
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %addr.1 = getelementptr i8, i8* %base, i8 1
+  %addr.1 = getelementptr i8, ptr %base, i8 1
   %res = add i8 %a, %b
-  store i8 %res, i8* %base
-  ret i8* %addr.1
+  store i8 %res, ptr %base
+  ret ptr %addr.1
 }
 
-define i8* @sbib(i8* %base, i8 %a, i8 %b) {
+define ptr @sbib(ptr %base, i8 %a, i8 %b) {
 ; RV32XTHEADMEMIDX-LABEL: sbib:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    add a1, a1, a2
@@ -374,13 +374,13 @@ define i8* @sbib(i8* %base, i8 %a, i8 %b) {
 ; RV64XTHEADMEMIDX-NEXT:    add a1, a1, a2
 ; RV64XTHEADMEMIDX-NEXT:    th.sbib a1, (a0), 1, 0
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %addr.1 = getelementptr i8, i8* %base, i8 1
+  %addr.1 = getelementptr i8, ptr %base, i8 1
   %res = add i8 %a, %b
-  store i8 %res, i8* %addr.1
-  ret i8* %addr.1
+  store i8 %res, ptr %addr.1
+  ret ptr %addr.1
 }
 
-define i16* @shia(i16* %base, i16 %a, i16 %b) {
+define ptr @shia(ptr %base, i16 %a, i16 %b) {
 ; RV32XTHEADMEMIDX-LABEL: shia:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    add a1, a1, a2
@@ -392,13 +392,13 @@ define i16* @shia(i16* %base, i16 %a, i16 %b) {
 ; RV64XTHEADMEMIDX-NEXT:    add a1, a1, a2
 ; RV64XTHEADMEMIDX-NEXT:    th.shia a1, (a0), -9, 1
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %addr.1 = getelementptr i16, i16* %base, i16 -9
+  %addr.1 = getelementptr i16, ptr %base, i16 -9
   %res = add i16 %a, %b
-  store i16 %res, i16* %base
-  ret i16* %addr.1
+  store i16 %res, ptr %base
+  ret ptr %addr.1
 }
 
-define i16* @shib(i16* %base, i16 %a, i16 %b) {
+define ptr @shib(ptr %base, i16 %a, i16 %b) {
 ; RV32XTHEADMEMIDX-LABEL: shib:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    add a1, a1, a2
@@ -410,13 +410,13 @@ define i16* @shib(i16* %base, i16 %a, i16 %b) {
 ; RV64XTHEADMEMIDX-NEXT:    add a1, a1, a2
 ; RV64XTHEADMEMIDX-NEXT:    th.shib a1, (a0), 2, 0
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %addr.1 = getelementptr i16, i16* %base, i16 1
+  %addr.1 = getelementptr i16, ptr %base, i16 1
   %res = add i16 %a, %b
-  store i16 %res, i16* %addr.1
-  ret i16* %addr.1
+  store i16 %res, ptr %addr.1
+  ret ptr %addr.1
 }
 
-define i32* @swia(i32* %base, i32 %a, i32 %b) {
+define ptr @swia(ptr %base, i32 %a, i32 %b) {
 ; RV32XTHEADMEMIDX-LABEL: swia:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    add a1, a1, a2
@@ -428,13 +428,13 @@ define i32* @swia(i32* %base, i32 %a, i32 %b) {
 ; RV64XTHEADMEMIDX-NEXT:    add a1, a1, a2
 ; RV64XTHEADMEMIDX-NEXT:    th.swia a1, (a0), 8, 2
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %addr.1 = getelementptr i32, i32* %base, i32 8
+  %addr.1 = getelementptr i32, ptr %base, i32 8
   %res = add i32 %a, %b
-  store i32 %res, i32* %base
-  ret i32* %addr.1
+  store i32 %res, ptr %base
+  ret ptr %addr.1
 }
 
-define i32* @swib(i32* %base, i32 %a, i32 %b) {
+define ptr @swib(ptr %base, i32 %a, i32 %b) {
 ; RV32XTHEADMEMIDX-LABEL: swib:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    add a1, a1, a2
@@ -446,13 +446,13 @@ define i32* @swib(i32* %base, i32 %a, i32 %b) {
 ; RV64XTHEADMEMIDX-NEXT:    add a1, a1, a2
 ; RV64XTHEADMEMIDX-NEXT:    th.swib a1, (a0), -13, 3
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %addr.1 = getelementptr i32, i32* %base, i32 -26
+  %addr.1 = getelementptr i32, ptr %base, i32 -26
   %res = add i32 %a, %b
-  store i32 %res, i32* %addr.1
-  ret i32* %addr.1
+  store i32 %res, ptr %addr.1
+  ret ptr %addr.1
 }
 
-define i64* @sdia(i64* %base, i64 %a, i64 %b) {
+define ptr @sdia(ptr %base, i64 %a, i64 %b) {
 ; RV32XTHEADMEMIDX-LABEL: sdia:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    addi a5, a0, 64
@@ -470,13 +470,13 @@ define i64* @sdia(i64* %base, i64 %a, i64 %b) {
 ; RV64XTHEADMEMIDX-NEXT:    add a1, a1, a2
 ; RV64XTHEADMEMIDX-NEXT:    th.sdia a1, (a0), 8, 3
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %addr.1 = getelementptr i64, i64* %base, i64 8
+  %addr.1 = getelementptr i64, ptr %base, i64 8
   %res = add i64 %a, %b
-  store i64 %res, i64* %base
-  ret i64* %addr.1
+  store i64 %res, ptr %base
+  ret ptr %addr.1
 }
 
-define i64* @sdib(i64* %base, i64 %a, i64 %b) {
+define ptr @sdib(ptr %base, i64 %a, i64 %b) {
 ; RV32XTHEADMEMIDX-LABEL: sdib:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    add a2, a2, a4
@@ -492,13 +492,13 @@ define i64* @sdib(i64* %base, i64 %a, i64 %b) {
 ; RV64XTHEADMEMIDX-NEXT:    add a1, a1, a2
 ; RV64XTHEADMEMIDX-NEXT:    th.sdib a1, (a0), 8, 0
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %addr.1 = getelementptr i64, i64* %base, i64 1
+  %addr.1 = getelementptr i64, ptr %base, i64 1
   %res = add i64 %a, %b
-  store i64 %res, i64* %addr.1
-  ret i64* %addr.1
+  store i64 %res, ptr %addr.1
+  ret ptr %addr.1
 }
 
-define i8 @lrb_anyext(i8* %a, i64 %b) {
+define i8 @lrb_anyext(ptr %a, i64 %b) {
 ; RV32XTHEADMEMIDX-LABEL: lrb_anyext:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    th.lrb a0, a0, a1, 0
@@ -508,12 +508,12 @@ define i8 @lrb_anyext(i8* %a, i64 %b) {
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    th.lrb a0, a0, a1, 0
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %1 = getelementptr i8, i8* %a, i64 %b
-  %2 = load i8, i8* %1, align 1
+  %1 = getelementptr i8, ptr %a, i64 %b
+  %2 = load i8, ptr %1, align 1
   ret i8 %2
 }
 
-define i64 @lrb(i8* %a, i64 %b) {
+define i64 @lrb(ptr %a, i64 %b) {
 ; RV32XTHEADMEMIDX-LABEL: lrb:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    th.lrb a1, a0, a1, 0
@@ -529,14 +529,14 @@ define i64 @lrb(i8* %a, i64 %b) {
 ; RV64XTHEADMEMIDX-NEXT:    th.lrb a0, a0, a1, 0
 ; RV64XTHEADMEMIDX-NEXT:    add a0, a0, a0
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %1 = getelementptr i8, i8* %a, i64 %b
-  %2 = load i8, i8* %1, align 1
+  %1 = getelementptr i8, ptr %a, i64 %b
+  %2 = load i8, ptr %1, align 1
   %3 = sext i8 %2 to i64
   %4 = add i64 %3, %3
   ret i64 %4
 }
 
-define i8 @lurb_anyext(i8* %a, i32 %b) {
+define i8 @lurb_anyext(ptr %a, i32 %b) {
 ; RV32XTHEADMEMIDX-LABEL: lurb_anyext:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    th.lrb a0, a0, a1, 0
@@ -547,12 +547,12 @@ define i8 @lurb_anyext(i8* %a, i32 %b) {
 ; RV64XTHEADMEMIDX-NEXT:    th.lurb a0, a0, a1, 0
 ; RV64XTHEADMEMIDX-NEXT:    ret
   %1 = zext i32 %b to i64
-  %2 = getelementptr i8, i8* %a, i64 %1
-  %3 = load i8, i8* %2, align 1
+  %2 = getelementptr i8, ptr %a, i64 %1
+  %3 = load i8, ptr %2, align 1
   ret i8 %3
 }
 
-define i64 @lurb(i8* %a, i32 %b) {
+define i64 @lurb(ptr %a, i32 %b) {
 ; RV32XTHEADMEMIDX-LABEL: lurb:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    th.lrb a1, a0, a1, 0
@@ -569,14 +569,14 @@ define i64 @lurb(i8* %a, i32 %b) {
 ; RV64XTHEADMEMIDX-NEXT:    add a0, a0, a0
 ; RV64XTHEADMEMIDX-NEXT:    ret
   %1 = zext i32 %b to i64
-  %2 = getelementptr i8, i8* %a, i64 %1
-  %3 = load i8, i8* %2, align 1
+  %2 = getelementptr i8, ptr %a, i64 %1
+  %3 = load i8, ptr %2, align 1
   %4 = sext i8 %3 to i64
   %5 = add i64 %4, %4
   ret i64 %5
 }
 
-define i64 @lrbu(i8* %a, i64 %b) {
+define i64 @lrbu(ptr %a, i64 %b) {
 ; RV32XTHEADMEMIDX-LABEL: lrbu:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    th.lrbu a1, a0, a1, 0
@@ -589,14 +589,14 @@ define i64 @lrbu(i8* %a, i64 %b) {
 ; RV64XTHEADMEMIDX-NEXT:    th.lrbu a0, a0, a1, 0
 ; RV64XTHEADMEMIDX-NEXT:    add a0, a0, a0
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %1 = getelementptr i8, i8* %a, i64 %b
-  %2 = load i8, i8* %1, align 1
+  %1 = getelementptr i8, ptr %a, i64 %b
+  %2 = load i8, ptr %1, align 1
   %3 = zext i8 %2 to i64
   %4 = add i64 %3, %3
   ret i64 %4
 }
 
-define i64 @lurbu(i8* %a, i32 %b) {
+define i64 @lurbu(ptr %a, i32 %b) {
 ; RV32XTHEADMEMIDX-LABEL: lurbu:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    th.lrbu a1, a0, a1, 0
@@ -610,14 +610,14 @@ define i64 @lurbu(i8* %a, i32 %b) {
 ; RV64XTHEADMEMIDX-NEXT:    add a0, a0, a0
 ; RV64XTHEADMEMIDX-NEXT:    ret
   %1 = zext i32 %b to i64
-  %2 = getelementptr i8, i8* %a, i64 %1
-  %3 = load i8, i8* %2, align 1
+  %2 = getelementptr i8, ptr %a, i64 %1
+  %3 = load i8, ptr %2, align 1
   %4 = zext i8 %3 to i64
   %5 = add i64 %4, %4
   ret i64 %5
 }
 
-define i16 @lrh_anyext(i16* %a, i64 %b) {
+define i16 @lrh_anyext(ptr %a, i64 %b) {
 ; RV32XTHEADMEMIDX-LABEL: lrh_anyext:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    th.lrh a0, a0, a1, 1
@@ -627,12 +627,12 @@ define i16 @lrh_anyext(i16* %a, i64 %b) {
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    th.lrh a0, a0, a1, 1
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %1 = getelementptr i16, i16* %a, i64 %b
-  %2 = load i16, i16* %1, align 2
+  %1 = getelementptr i16, ptr %a, i64 %b
+  %2 = load i16, ptr %1, align 2
   ret i16 %2
 }
 
-define i64 @lrh(i16* %a, i64 %b) {
+define i64 @lrh(ptr %a, i64 %b) {
 ; RV32XTHEADMEMIDX-LABEL: lrh:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    th.lrh a1, a0, a1, 1
@@ -648,14 +648,14 @@ define i64 @lrh(i16* %a, i64 %b) {
 ; RV64XTHEADMEMIDX-NEXT:    th.lrh a0, a0, a1, 1
 ; RV64XTHEADMEMIDX-NEXT:    add a0, a0, a0
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %1 = getelementptr i16, i16* %a, i64 %b
-  %2 = load i16, i16* %1, align 2
+  %1 = getelementptr i16, ptr %a, i64 %b
+  %2 = load i16, ptr %1, align 2
   %3 = sext i16 %2 to i64
   %4 = add i64 %3, %3
   ret i64 %4
 }
 
-define i16 @lurh_anyext(i16* %a, i32 %b) {
+define i16 @lurh_anyext(ptr %a, i32 %b) {
 ; RV32XTHEADMEMIDX-LABEL: lurh_anyext:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    th.lrh a0, a0, a1, 1
@@ -666,12 +666,12 @@ define i16 @lurh_anyext(i16* %a, i32 %b) {
 ; RV64XTHEADMEMIDX-NEXT:    th.lurh a0, a0, a1, 1
 ; RV64XTHEADMEMIDX-NEXT:    ret
   %1 = zext i32 %b to i64
-  %2 = getelementptr i16, i16* %a, i64 %1
-  %3 = load i16, i16* %2, align 2
+  %2 = getelementptr i16, ptr %a, i64 %1
+  %3 = load i16, ptr %2, align 2
   ret i16 %3
 }
 
-define i64 @lurh(i16* %a, i32 %b) {
+define i64 @lurh(ptr %a, i32 %b) {
 ; RV32XTHEADMEMIDX-LABEL: lurh:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    th.lrh a1, a0, a1, 1
@@ -688,14 +688,14 @@ define i64 @lurh(i16* %a, i32 %b) {
 ; RV64XTHEADMEMIDX-NEXT:    add a0, a0, a0
 ; RV64XTHEADMEMIDX-NEXT:    ret
   %1 = zext i32 %b to i64
-  %2 = getelementptr i16, i16* %a, i64 %1
-  %3 = load i16, i16* %2, align 2
+  %2 = getelementptr i16, ptr %a, i64 %1
+  %3 = load i16, ptr %2, align 2
   %4 = sext i16 %3 to i64
   %5 = add i64 %4, %4
   ret i64 %5
 }
 
-define i64 @lrhu(i16* %a, i64 %b) {
+define i64 @lrhu(ptr %a, i64 %b) {
 ; RV32XTHEADMEMIDX-LABEL: lrhu:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    th.lrhu a1, a0, a1, 1
@@ -708,14 +708,14 @@ define i64 @lrhu(i16* %a, i64 %b) {
 ; RV64XTHEADMEMIDX-NEXT:    th.lrhu a0, a0, a1, 1
 ; RV64XTHEADMEMIDX-NEXT:    add a0, a0, a0
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %1 = getelementptr i16, i16* %a, i64 %b
-  %2 = load i16, i16* %1, align 2
+  %1 = getelementptr i16, ptr %a, i64 %b
+  %2 = load i16, ptr %1, align 2
   %3 = zext i16 %2 to i64
   %4 = add i64 %3, %3
   ret i64 %4
 }
 
-define i64 @lurhu(i16* %a, i32 %b) {
+define i64 @lurhu(ptr %a, i32 %b) {
 ; RV32XTHEADMEMIDX-LABEL: lurhu:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    th.lrhu a1, a0, a1, 1
@@ -729,14 +729,14 @@ define i64 @lurhu(i16* %a, i32 %b) {
 ; RV64XTHEADMEMIDX-NEXT:    add a0, a0, a0
 ; RV64XTHEADMEMIDX-NEXT:    ret
   %1 = zext i32 %b to i64
-  %2 = getelementptr i16, i16* %a, i64 %1
-  %3 = load i16, i16* %2, align 2
+  %2 = getelementptr i16, ptr %a, i64 %1
+  %3 = load i16, ptr %2, align 2
   %4 = zext i16 %3 to i64
   %5 = add i64 %4, %4
   ret i64 %5
 }
 
-define i32 @lrw_anyext(i32* %a, i64 %b) {
+define i32 @lrw_anyext(ptr %a, i64 %b) {
 ; RV32XTHEADMEMIDX-LABEL: lrw_anyext:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    th.lrw a0, a0, a1, 2
@@ -746,12 +746,12 @@ define i32 @lrw_anyext(i32* %a, i64 %b) {
 ; RV64XTHEADMEMIDX:       # %bb.0:
 ; RV64XTHEADMEMIDX-NEXT:    th.lrw a0, a0, a1, 2
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %1 = getelementptr i32, i32* %a, i64 %b
-  %2 = load i32, i32* %1, align 4
+  %1 = getelementptr i32, ptr %a, i64 %b
+  %2 = load i32, ptr %1, align 4
   ret i32 %2
 }
 
-define i64 @lrw(i32* %a, i64 %b) {
+define i64 @lrw(ptr %a, i64 %b) {
 ; RV32XTHEADMEMIDX-LABEL: lrw:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    th.lrw a1, a0, a1, 2
@@ -767,14 +767,14 @@ define i64 @lrw(i32* %a, i64 %b) {
 ; RV64XTHEADMEMIDX-NEXT:    th.lrw a0, a0, a1, 2
 ; RV64XTHEADMEMIDX-NEXT:    add a0, a0, a0
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %1 = getelementptr i32, i32* %a, i64 %b
-  %2 = load i32, i32* %1, align 4
+  %1 = getelementptr i32, ptr %a, i64 %b
+  %2 = load i32, ptr %1, align 4
   %3 = sext i32 %2 to i64
   %4 = add i64 %3, %3
   ret i64 %4
 }
 
-define i32 @lurw_anyext(i32* %a, i32 %b) {
+define i32 @lurw_anyext(ptr %a, i32 %b) {
 ; RV32XTHEADMEMIDX-LABEL: lurw_anyext:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    th.lrw a0, a0, a1, 2
@@ -785,12 +785,12 @@ define i32 @lurw_anyext(i32* %a, i32 %b) {
 ; RV64XTHEADMEMIDX-NEXT:    th.lurw a0, a0, a1, 2
 ; RV64XTHEADMEMIDX-NEXT:    ret
   %1 = zext i32 %b to i64
-  %2 = getelementptr i32, i32* %a, i64 %1
-  %3 = load i32, i32* %2, align 4
+  %2 = getelementptr i32, ptr %a, i64 %1
+  %3 = load i32, ptr %2, align 4
   ret i32 %3
 }
 
-define i64 @lurw(i32* %a, i32 %b) {
+define i64 @lurw(ptr %a, i32 %b) {
 ; RV32XTHEADMEMIDX-LABEL: lurw:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    th.lrw a1, a0, a1, 2
@@ -807,14 +807,14 @@ define i64 @lurw(i32* %a, i32 %b) {
 ; RV64XTHEADMEMIDX-NEXT:    add a0, a0, a0
 ; RV64XTHEADMEMIDX-NEXT:    ret
   %1 = zext i32 %b to i64
-  %2 = getelementptr i32, i32* %a, i64 %1
-  %3 = load i32, i32* %2, align 4
+  %2 = getelementptr i32, ptr %a, i64 %1
+  %3 = load i32, ptr %2, align 4
   %4 = sext i32 %3 to i64
   %5 = add i64 %4, %4
   ret i64 %5
 }
 
-define i64 @lrwu(i32* %a, i64 %b) {
+define i64 @lrwu(ptr %a, i64 %b) {
 ; RV32XTHEADMEMIDX-LABEL: lrwu:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    th.lrw a1, a0, a1, 2
@@ -827,14 +827,14 @@ define i64 @lrwu(i32* %a, i64 %b) {
 ; RV64XTHEADMEMIDX-NEXT:    th.lrwu a0, a0, a1, 2
 ; RV64XTHEADMEMIDX-NEXT:    add a0, a0, a0
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %1 = getelementptr i32, i32* %a, i64 %b
-  %2 = load i32, i32* %1, align 4
+  %1 = getelementptr i32, ptr %a, i64 %b
+  %2 = load i32, ptr %1, align 4
   %3 = zext i32 %2 to i64
   %4 = add i64 %3, %3
   ret i64 %4
 }
 
-define i64 @lurwu(i32* %a, i32 %b) {
+define i64 @lurwu(ptr %a, i32 %b) {
 ; RV32XTHEADMEMIDX-LABEL: lurwu:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    th.lrw a1, a0, a1, 2
@@ -848,14 +848,14 @@ define i64 @lurwu(i32* %a, i32 %b) {
 ; RV64XTHEADMEMIDX-NEXT:    add a0, a0, a0
 ; RV64XTHEADMEMIDX-NEXT:    ret
   %1 = zext i32 %b to i64
-  %2 = getelementptr i32, i32* %a, i64 %1
-  %3 = load i32, i32* %2, align 4
+  %2 = getelementptr i32, ptr %a, i64 %1
+  %3 = load i32, ptr %2, align 4
   %4 = zext i32 %3 to i64
   %5 = add i64 %4, %4
   ret i64 %5
 }
 
-define i64 @lrd(i64* %a, i64 %b) {
+define i64 @lrd(ptr %a, i64 %b) {
 ; RV32XTHEADMEMIDX-LABEL: lrd:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    slli a2, a1, 3
@@ -873,13 +873,13 @@ define i64 @lrd(i64* %a, i64 %b) {
 ; RV64XTHEADMEMIDX-NEXT:    th.lrd a0, a0, a1, 3
 ; RV64XTHEADMEMIDX-NEXT:    add a0, a0, a0
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %1 = getelementptr i64, i64* %a, i64 %b
-  %2 = load i64, i64* %1, align 8
+  %1 = getelementptr i64, ptr %a, i64 %b
+  %2 = load i64, ptr %1, align 8
   %3 = add i64 %2, %2
   ret i64 %3
 }
 
-define i64 @lrd_2(i64* %a, i64 %b) {
+define i64 @lrd_2(ptr %a, i64 %b) {
 ; RV32XTHEADMEMIDX-LABEL: lrd_2:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    addi a2, a0, 96
@@ -899,13 +899,13 @@ define i64 @lrd_2(i64* %a, i64 %b) {
 ; RV64XTHEADMEMIDX-NEXT:    add a0, a0, a0
 ; RV64XTHEADMEMIDX-NEXT:    ret
   %1 = add i64 %b, 12
-  %2 = getelementptr i64, i64* %a, i64 %1
-  %3 = load i64, i64* %2, align 8
+  %2 = getelementptr i64, ptr %a, i64 %1
+  %3 = load i64, ptr %2, align 8
   %4 = add i64 %3, %3
   ret i64 %4
 }
 
-define i64 @lurd(i64* %a, i32 %b) {
+define i64 @lurd(ptr %a, i32 %b) {
 ; RV32XTHEADMEMIDX-LABEL: lurd:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    slli a2, a1, 3
@@ -924,13 +924,13 @@ define i64 @lurd(i64* %a, i32 %b) {
 ; RV64XTHEADMEMIDX-NEXT:    add a0, a0, a0
 ; RV64XTHEADMEMIDX-NEXT:    ret
   %1 = zext i32 %b to i64
-  %2 = getelementptr i64, i64* %a, i64 %1
-  %3 = load i64, i64* %2, align 8
+  %2 = getelementptr i64, ptr %a, i64 %1
+  %3 = load i64, ptr %2, align 8
   %4 = add i64 %3, %3
   ret i64 %4
 }
 
-define void @srb(i8* %a, i64 %b, i8 %c) {
+define void @srb(ptr %a, i64 %b, i8 %c) {
 ; RV32XTHEADMEMIDX-LABEL: srb:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    add a3, a3, a3
@@ -943,12 +943,12 @@ define void @srb(i8* %a, i64 %b, i8 %c) {
 ; RV64XTHEADMEMIDX-NEXT:    th.srb a2, a0, a1, 0
 ; RV64XTHEADMEMIDX-NEXT:    ret
   %1 = add i8 %c, %c
-  %2 = getelementptr i8, i8* %a, i64 %b
-  store i8 %1, i8* %2, align 1
+  %2 = getelementptr i8, ptr %a, i64 %b
+  store i8 %1, ptr %2, align 1
   ret void
 }
 
-define void @surb(i8* %a, i32 %b, i8 %c) {
+define void @surb(ptr %a, i32 %b, i8 %c) {
 ; RV32XTHEADMEMIDX-LABEL: surb:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    add a2, a2, a2
@@ -962,12 +962,12 @@ define void @surb(i8* %a, i32 %b, i8 %c) {
 ; RV64XTHEADMEMIDX-NEXT:    ret
   %1 = zext i32 %b to i64
   %2 = add i8 %c, %c
-  %3 = getelementptr i8, i8* %a, i64 %1
-  store i8 %2, i8* %3, align 1
+  %3 = getelementptr i8, ptr %a, i64 %1
+  store i8 %2, ptr %3, align 1
   ret void
 }
 
-define void @srh(i16* %a, i64 %b, i16 %c) {
+define void @srh(ptr %a, i64 %b, i16 %c) {
 ; RV32XTHEADMEMIDX-LABEL: srh:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    add a3, a3, a3
@@ -980,12 +980,12 @@ define void @srh(i16* %a, i64 %b, i16 %c) {
 ; RV64XTHEADMEMIDX-NEXT:    th.srh a2, a0, a1, 1
 ; RV64XTHEADMEMIDX-NEXT:    ret
   %1 = add i16 %c, %c
-  %2 = getelementptr i16, i16* %a, i64 %b
-  store i16 %1, i16* %2, align 2
+  %2 = getelementptr i16, ptr %a, i64 %b
+  store i16 %1, ptr %2, align 2
   ret void
 }
 
-define void @surh(i16* %a, i32 %b, i16 %c) {
+define void @surh(ptr %a, i32 %b, i16 %c) {
 ; RV32XTHEADMEMIDX-LABEL: surh:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    add a2, a2, a2
@@ -999,12 +999,12 @@ define void @surh(i16* %a, i32 %b, i16 %c) {
 ; RV64XTHEADMEMIDX-NEXT:    ret
   %1 = zext i32 %b to i64
   %2 = add i16 %c, %c
-  %3 = getelementptr i16, i16* %a, i64 %1
-  store i16 %2, i16* %3, align 2
+  %3 = getelementptr i16, ptr %a, i64 %1
+  store i16 %2, ptr %3, align 2
   ret void
 }
 
-define void @srw(i32* %a, i64 %b, i32 %c) {
+define void @srw(ptr %a, i64 %b, i32 %c) {
 ; RV32XTHEADMEMIDX-LABEL: srw:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    add a3, a3, a3
@@ -1017,12 +1017,12 @@ define void @srw(i32* %a, i64 %b, i32 %c) {
 ; RV64XTHEADMEMIDX-NEXT:    th.srw a2, a0, a1, 2
 ; RV64XTHEADMEMIDX-NEXT:    ret
   %1 = add i32 %c, %c
-  %2 = getelementptr i32, i32* %a, i64 %b
-  store i32 %1, i32* %2, align 4
+  %2 = getelementptr i32, ptr %a, i64 %b
+  store i32 %1, ptr %2, align 4
   ret void
 }
 
-define void @surw(i32* %a, i32 %b, i32 %c) {
+define void @surw(ptr %a, i32 %b, i32 %c) {
 ; RV32XTHEADMEMIDX-LABEL: surw:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    add a2, a2, a2
@@ -1036,12 +1036,12 @@ define void @surw(i32* %a, i32 %b, i32 %c) {
 ; RV64XTHEADMEMIDX-NEXT:    ret
   %1 = zext i32 %b to i64
   %2 = add i32 %c, %c
-  %3 = getelementptr i32, i32* %a, i64 %1
-  store i32 %2, i32* %3, align 4
+  %3 = getelementptr i32, ptr %a, i64 %1
+  store i32 %2, ptr %3, align 4
   ret void
 }
 
-define void @srd(i64* %a, i64 %b, i64 %c) {
+define void @srd(ptr %a, i64 %b, i64 %c) {
 ; RV32XTHEADMEMIDX-LABEL: srd:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    add a2, a3, a3
@@ -1060,12 +1060,12 @@ define void @srd(i64* %a, i64 %b, i64 %c) {
 ; RV64XTHEADMEMIDX-NEXT:    th.srd a2, a0, a1, 3
 ; RV64XTHEADMEMIDX-NEXT:    ret
   %1 = add i64 %c, %c
-  %2 = getelementptr i64, i64* %a, i64 %b
-  store i64 %1, i64* %2, align 8
+  %2 = getelementptr i64, ptr %a, i64 %b
+  store i64 %1, ptr %2, align 8
   ret void
 }
 
-define void @surd(i64* %a, i32 %b, i64 %c) {
+define void @surd(ptr %a, i32 %b, i64 %c) {
 ; RV32XTHEADMEMIDX-LABEL: surd:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    add a4, a2, a2
@@ -1085,12 +1085,12 @@ define void @surd(i64* %a, i32 %b, i64 %c) {
 ; RV64XTHEADMEMIDX-NEXT:    ret
   %1 = zext i32 %b to i64
   %2 = add i64 %c, %c
-  %3 = getelementptr i64, i64* %a, i64 %1
-  store i64 %2, i64* %3, align 8
+  %3 = getelementptr i64, ptr %a, i64 %1
+  store i64 %2, ptr %3, align 8
   ret void
 }
 
-define i32* @test_simm5(i32* %base, i32 %a, i32 %b) {
+define ptr @test_simm5(ptr %base, i32 %a, i32 %b) {
 ; RV32XTHEADMEMIDX-LABEL: test_simm5:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    add a1, a1, a2
@@ -1102,13 +1102,13 @@ define i32* @test_simm5(i32* %base, i32 %a, i32 %b) {
 ; RV64XTHEADMEMIDX-NEXT:    add a1, a1, a2
 ; RV64XTHEADMEMIDX-NEXT:    th.swia a1, (a0), -12, 2
 ; RV64XTHEADMEMIDX-NEXT:    ret
-  %addr.1 = getelementptr i32, i32* %base, i32 -12
+  %addr.1 = getelementptr i32, ptr %base, i32 -12
   %res = add i32 %a, %b
-  store i32 %res, i32* %base
-  ret i32* %addr.1
+  store i32 %res, ptr %base
+  ret ptr %addr.1
 }
 
-define i64 @lrd_large_shift(i64* %a, i64 %b) {
+define i64 @lrd_large_shift(ptr %a, i64 %b) {
 ; RV32XTHEADMEMIDX-LABEL: lrd_large_shift:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    slli a1, a1, 5
@@ -1125,12 +1125,12 @@ define i64 @lrd_large_shift(i64* %a, i64 %b) {
 ; RV64XTHEADMEMIDX-NEXT:    ret
   %1 = add i64 %b, 12
   %2 = shl i64 %1, 2
-  %3 = getelementptr i64, i64* %a, i64 %2
-  %4 = load i64, i64* %3, align 8
+  %3 = getelementptr i64, ptr %a, i64 %2
+  %4 = load i64, ptr %3, align 8
   ret i64 %4
 }
 
-define i64 @lrd_large_offset(i64* %a, i64 %b) {
+define i64 @lrd_large_offset(ptr %a, i64 %b) {
 ; RV32XTHEADMEMIDX-LABEL: lrd_large_offset:
 ; RV32XTHEADMEMIDX:       # %bb.0:
 ; RV32XTHEADMEMIDX-NEXT:    slli a1, a1, 3
@@ -1151,7 +1151,7 @@ define i64 @lrd_large_offset(i64* %a, i64 %b) {
 ; RV64XTHEADMEMIDX-NEXT:    ld a0, 1792(a0)
 ; RV64XTHEADMEMIDX-NEXT:    ret
   %1 = add i64 %b, 12000
-  %2 = getelementptr i64, i64* %a, i64 %1
-  %3 = load i64, i64* %2, align 8
+  %2 = getelementptr i64, ptr %a, i64 %1
+  %3 = load i64, ptr %2, align 8
   ret i64 %3
 }

diff  --git a/llvm/test/CodeGen/RISCV/xtheadmempair.ll b/llvm/test/CodeGen/RISCV/xtheadmempair.ll
index 34900b3006915..333fd4c047242 100644
--- a/llvm/test/CodeGen/RISCV/xtheadmempair.ll
+++ b/llvm/test/CodeGen/RISCV/xtheadmempair.ll
@@ -4,7 +4,7 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+xtheadmempair -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s -check-prefix=RV64XTHEADMEMPAIR
 
-define i64 @lwd(i32* %a) {
+define i64 @lwd(ptr %a) {
 ; RV32XTHEADMEMPAIR-LABEL: lwd:
 ; RV32XTHEADMEMPAIR:       # %bb.0:
 ; RV32XTHEADMEMPAIR-NEXT:    th.lwd a1, a2, (a0), 2, 3
@@ -21,17 +21,17 @@ define i64 @lwd(i32* %a) {
 ; RV64XTHEADMEMPAIR-NEXT:    th.lwd a1, a2, (a0), 2, 3
 ; RV64XTHEADMEMPAIR-NEXT:    add a0, a1, a2
 ; RV64XTHEADMEMPAIR-NEXT:    ret
-  %1 = getelementptr i32, i32* %a, i64 4
-  %2 = load i32, i32* %1, align 4
-  %3 = getelementptr i32, i32* %a, i64 5
-  %4 = load i32, i32* %3, align 4
+  %1 = getelementptr i32, ptr %a, i64 4
+  %2 = load i32, ptr %1, align 4
+  %3 = getelementptr i32, ptr %a, i64 5
+  %4 = load i32, ptr %3, align 4
   %5 = sext i32 %2 to i64
   %6 = sext i32 %4 to i64
   %7 = add i64 %5, %6
   ret i64 %7
 }
 
-define i64 @lwud(i32* %a) {
+define i64 @lwud(ptr %a) {
 ; RV32XTHEADMEMPAIR-LABEL: lwud:
 ; RV32XTHEADMEMPAIR:       # %bb.0:
 ; RV32XTHEADMEMPAIR-NEXT:    th.lwd a1, a2, (a0), 2, 3
@@ -44,17 +44,17 @@ define i64 @lwud(i32* %a) {
 ; RV64XTHEADMEMPAIR-NEXT:    th.lwud a1, a2, (a0), 2, 3
 ; RV64XTHEADMEMPAIR-NEXT:    add a0, a1, a2
 ; RV64XTHEADMEMPAIR-NEXT:    ret
-  %1 = getelementptr i32, i32* %a, i64 4
-  %2 = load i32, i32* %1, align 4
-  %3 = getelementptr i32, i32* %a, i64 5
-  %4 = load i32, i32* %3, align 4
+  %1 = getelementptr i32, ptr %a, i64 4
+  %2 = load i32, ptr %1, align 4
+  %3 = getelementptr i32, ptr %a, i64 5
+  %4 = load i32, ptr %3, align 4
   %5 = zext i32 %2 to i64
   %6 = zext i32 %4 to i64
   %7 = add i64 %5, %6
   ret i64 %7
 }
 
-define i64 @ldd(i64* %a) {
+define i64 @ldd(ptr %a) {
 ; RV32XTHEADMEMPAIR-LABEL: ldd:
 ; RV32XTHEADMEMPAIR:       # %bb.0:
 ; RV32XTHEADMEMPAIR-NEXT:    lw a1, 32(a0)
@@ -72,15 +72,15 @@ define i64 @ldd(i64* %a) {
 ; RV64XTHEADMEMPAIR-NEXT:    th.ldd a1, a2, (a0), 2, 4
 ; RV64XTHEADMEMPAIR-NEXT:    add a0, a1, a2
 ; RV64XTHEADMEMPAIR-NEXT:    ret
-  %1 = getelementptr i64, i64* %a, i64 4
-  %2 = load i64, i64* %1, align 8
-  %3 = getelementptr i64, i64* %a, i64 5
-  %4 = load i64, i64* %3, align 8
+  %1 = getelementptr i64, ptr %a, i64 4
+  %2 = load i64, ptr %1, align 8
+  %3 = getelementptr i64, ptr %a, i64 5
+  %4 = load i64, ptr %3, align 8
   %5 = add i64 %2, %4
   ret i64 %5
 }
 
-define i64 @lwd_0(i32* %a) {
+define i64 @lwd_0(ptr %a) {
 ; RV32XTHEADMEMPAIR-LABEL: lwd_0:
 ; RV32XTHEADMEMPAIR:       # %bb.0:
 ; RV32XTHEADMEMPAIR-NEXT:    th.lwd a1, a2, (a0), 0, 3
@@ -97,17 +97,17 @@ define i64 @lwd_0(i32* %a) {
 ; RV64XTHEADMEMPAIR-NEXT:    th.lwd a1, a2, (a0), 0, 3
 ; RV64XTHEADMEMPAIR-NEXT:    add a0, a1, a2
 ; RV64XTHEADMEMPAIR-NEXT:    ret
-  %1 = getelementptr i32, i32* %a, i64 0
-  %2 = load i32, i32* %1, align 4
-  %3 = getelementptr i32, i32* %a, i64 1
-  %4 = load i32, i32* %3, align 4
+  %1 = getelementptr i32, ptr %a, i64 0
+  %2 = load i32, ptr %1, align 4
+  %3 = getelementptr i32, ptr %a, i64 1
+  %4 = load i32, ptr %3, align 4
   %5 = sext i32 %2 to i64
   %6 = sext i32 %4 to i64
   %7 = add i64 %5, %6
   ret i64 %7
 }
 
-define i64 @lwud_0(i32* %a) {
+define i64 @lwud_0(ptr %a) {
 ; RV32XTHEADMEMPAIR-LABEL: lwud_0:
 ; RV32XTHEADMEMPAIR:       # %bb.0:
 ; RV32XTHEADMEMPAIR-NEXT:    th.lwd a1, a2, (a0), 0, 3
@@ -120,17 +120,17 @@ define i64 @lwud_0(i32* %a) {
 ; RV64XTHEADMEMPAIR-NEXT:    th.lwud a1, a2, (a0), 0, 3
 ; RV64XTHEADMEMPAIR-NEXT:    add a0, a1, a2
 ; RV64XTHEADMEMPAIR-NEXT:    ret
-  %1 = getelementptr i32, i32* %a, i64 0
-  %2 = load i32, i32* %1, align 4
-  %3 = getelementptr i32, i32* %a, i64 1
-  %4 = load i32, i32* %3, align 4
+  %1 = getelementptr i32, ptr %a, i64 0
+  %2 = load i32, ptr %1, align 4
+  %3 = getelementptr i32, ptr %a, i64 1
+  %4 = load i32, ptr %3, align 4
   %5 = zext i32 %2 to i64
   %6 = zext i32 %4 to i64
   %7 = add i64 %5, %6
   ret i64 %7
 }
 
-define i64 @ldd_0(i64* %a) {
+define i64 @ldd_0(ptr %a) {
 ; RV32XTHEADMEMPAIR-LABEL: ldd_0:
 ; RV32XTHEADMEMPAIR:       # %bb.0:
 ; RV32XTHEADMEMPAIR-NEXT:    th.lwd a1, a2, (a0), 0, 3
@@ -146,15 +146,15 @@ define i64 @ldd_0(i64* %a) {
 ; RV64XTHEADMEMPAIR-NEXT:    th.ldd a1, a2, (a0), 0, 4
 ; RV64XTHEADMEMPAIR-NEXT:    add a0, a1, a2
 ; RV64XTHEADMEMPAIR-NEXT:    ret
-  %1 = getelementptr i64, i64* %a, i64 0
-  %2 = load i64, i64* %1, align 8
-  %3 = getelementptr i64, i64* %a, i64 1
-  %4 = load i64, i64* %3, align 8
+  %1 = getelementptr i64, ptr %a, i64 0
+  %2 = load i64, ptr %1, align 8
+  %3 = getelementptr i64, ptr %a, i64 1
+  %4 = load i64, ptr %3, align 8
   %5 = add i64 %2, %4
   ret i64 %5
 }
 
-define void @swd(i32* %a, i32 %b, i32%c) {
+define void @swd(ptr %a, i32 %b, i32%c) {
 ; RV32XTHEADMEMPAIR-LABEL: swd:
 ; RV32XTHEADMEMPAIR:       # %bb.0:
 ; RV32XTHEADMEMPAIR-NEXT:    th.swd a1, a2, (a0), 2, 3
@@ -164,14 +164,14 @@ define void @swd(i32* %a, i32 %b, i32%c) {
 ; RV64XTHEADMEMPAIR:       # %bb.0:
 ; RV64XTHEADMEMPAIR-NEXT:    th.swd a1, a2, (a0), 2, 3
 ; RV64XTHEADMEMPAIR-NEXT:    ret
-  %1 = getelementptr i32, i32* %a, i64 4
-  store i32 %b, i32* %1, align 4
-  %2 = getelementptr i32, i32* %a, i64 5
-  store i32 %c, i32* %2, align 4
+  %1 = getelementptr i32, ptr %a, i64 4
+  store i32 %b, ptr %1, align 4
+  %2 = getelementptr i32, ptr %a, i64 5
+  store i32 %c, ptr %2, align 4
   ret void
 }
 
-define void @sdd(i64* %a, i64 %b, i64%c) {
+define void @sdd(ptr %a, i64 %b, i64%c) {
 ; RV32XTHEADMEMPAIR-LABEL: sdd:
 ; RV32XTHEADMEMPAIR:       # %bb.0:
 ; RV32XTHEADMEMPAIR-NEXT:    sw a2, 36(a0)
@@ -184,14 +184,14 @@ define void @sdd(i64* %a, i64 %b, i64%c) {
 ; RV64XTHEADMEMPAIR:       # %bb.0:
 ; RV64XTHEADMEMPAIR-NEXT:    th.sdd a1, a2, (a0), 2, 4
 ; RV64XTHEADMEMPAIR-NEXT:    ret
-  %1 = getelementptr i64, i64* %a, i64 4
-  store i64 %b, i64* %1, align 8
-  %2 = getelementptr i64, i64* %a, i64 5
-  store i64 %c, i64* %2, align 8
+  %1 = getelementptr i64, ptr %a, i64 4
+  store i64 %b, ptr %1, align 8
+  %2 = getelementptr i64, ptr %a, i64 5
+  store i64 %c, ptr %2, align 8
   ret void
 }
 
-define void @swd_0(i32* %a, i32 %b, i32%c) {
+define void @swd_0(ptr %a, i32 %b, i32%c) {
 ; RV32XTHEADMEMPAIR-LABEL: swd_0:
 ; RV32XTHEADMEMPAIR:       # %bb.0:
 ; RV32XTHEADMEMPAIR-NEXT:    th.swd a1, a2, (a0), 0, 3
@@ -201,14 +201,14 @@ define void @swd_0(i32* %a, i32 %b, i32%c) {
 ; RV64XTHEADMEMPAIR:       # %bb.0:
 ; RV64XTHEADMEMPAIR-NEXT:    th.swd a1, a2, (a0), 0, 3
 ; RV64XTHEADMEMPAIR-NEXT:    ret
-  %1 = getelementptr i32, i32* %a, i64 0
-  store i32 %b, i32* %1, align 4
-  %2 = getelementptr i32, i32* %a, i64 1
-  store i32 %c, i32* %2, align 4
+  %1 = getelementptr i32, ptr %a, i64 0
+  store i32 %b, ptr %1, align 4
+  %2 = getelementptr i32, ptr %a, i64 1
+  store i32 %c, ptr %2, align 4
   ret void
 }
 
-define void @sdd_0(i64* %a, i64 %b, i64%c) {
+define void @sdd_0(ptr %a, i64 %b, i64%c) {
 ; RV32XTHEADMEMPAIR-LABEL: sdd_0:
 ; RV32XTHEADMEMPAIR:       # %bb.0:
 ; RV32XTHEADMEMPAIR-NEXT:    th.swd a1, a2, (a0), 0, 3
@@ -219,14 +219,14 @@ define void @sdd_0(i64* %a, i64 %b, i64%c) {
 ; RV64XTHEADMEMPAIR:       # %bb.0:
 ; RV64XTHEADMEMPAIR-NEXT:    th.sdd a1, a2, (a0), 0, 4
 ; RV64XTHEADMEMPAIR-NEXT:    ret
-  %1 = getelementptr i64, i64* %a, i64 0
-  store i64 %b, i64* %1, align 8
-  %2 = getelementptr i64, i64* %a, i64 1
-  store i64 %c, i64* %2, align 8
+  %1 = getelementptr i64, ptr %a, i64 0
+  store i64 %b, ptr %1, align 8
+  %2 = getelementptr i64, ptr %a, i64 1
+  store i64 %c, ptr %2, align 8
   ret void
 }
 
-define i64 @ld64(i64* %a) {
+define i64 @ld64(ptr %a) {
 ; RV32XTHEADMEMPAIR-LABEL: ld64:
 ; RV32XTHEADMEMPAIR:       # %bb.0:
 ; RV32XTHEADMEMPAIR-NEXT:    th.lwd a2, a1, (a0), 0, 3
@@ -237,12 +237,12 @@ define i64 @ld64(i64* %a) {
 ; RV64XTHEADMEMPAIR:       # %bb.0:
 ; RV64XTHEADMEMPAIR-NEXT:    ld a0, 0(a0)
 ; RV64XTHEADMEMPAIR-NEXT:    ret
-  %1 = getelementptr i64, i64* %a, i64 0
-  %2 = load i64, i64* %1, align 8
+  %1 = getelementptr i64, ptr %a, i64 0
+  %2 = load i64, ptr %1, align 8
   ret i64 %2
 }
 
-define i128 @ld128(i128* %a) {
+define i128 @ld128(ptr %a) {
 ; RV32XTHEADMEMPAIR-LABEL: ld128:
 ; RV32XTHEADMEMPAIR:       # %bb.0:
 ; RV32XTHEADMEMPAIR-NEXT:    th.lwd a2, a3, (a1), 1, 3
@@ -256,12 +256,12 @@ define i128 @ld128(i128* %a) {
 ; RV64XTHEADMEMPAIR-NEXT:    th.ldd a2, a1, (a0), 0, 4
 ; RV64XTHEADMEMPAIR-NEXT:    mv a0, a2
 ; RV64XTHEADMEMPAIR-NEXT:    ret
-  %1 = getelementptr i128, i128* %a, i64 0
-  %2 = load i128, i128* %1, align 8
+  %1 = getelementptr i128, ptr %a, i64 0
+  %2 = load i128, ptr %1, align 8
   ret i128 %2
 }
 
-define void @sd64(i64* %a, i64 %b) {
+define void @sd64(ptr %a, i64 %b) {
 ; RV32XTHEADMEMPAIR-LABEL: sd64:
 ; RV32XTHEADMEMPAIR:       # %bb.0:
 ; RV32XTHEADMEMPAIR-NEXT:    th.swd a1, a2, (a0), 0, 3
@@ -271,12 +271,12 @@ define void @sd64(i64* %a, i64 %b) {
 ; RV64XTHEADMEMPAIR:       # %bb.0:
 ; RV64XTHEADMEMPAIR-NEXT:    sd a1, 0(a0)
 ; RV64XTHEADMEMPAIR-NEXT:    ret
-  %1 = getelementptr i64, i64* %a, i64 0
-  store i64 %b, i64* %1, align 8
+  %1 = getelementptr i64, ptr %a, i64 0
+  store i64 %b, ptr %1, align 8
   ret void
 }
 
-define void @sd128(i128* %a, i128 %b) {
+define void @sd128(ptr %a, i128 %b) {
 ; RV32XTHEADMEMPAIR-LABEL: sd128:
 ; RV32XTHEADMEMPAIR:       # %bb.0:
 ; RV32XTHEADMEMPAIR-NEXT:    th.lwd a2, a3, (a1), 1, 3
@@ -289,12 +289,12 @@ define void @sd128(i128* %a, i128 %b) {
 ; RV64XTHEADMEMPAIR:       # %bb.0:
 ; RV64XTHEADMEMPAIR-NEXT:    th.sdd a1, a2, (a0), 0, 4
 ; RV64XTHEADMEMPAIR-NEXT:    ret
-  %1 = getelementptr i128, i128* %a, i64 0
-  store i128 %b, i128* %1, align 8
+  %1 = getelementptr i128, ptr %a, i64 0
+  store i128 %b, ptr %1, align 8
   ret void
 }
 
-define i32 @lh(i16* %a) {
+define i32 @lh(ptr %a) {
 ; RV32XTHEADMEMPAIR-LABEL: lh:
 ; RV32XTHEADMEMPAIR:       # %bb.0:
 ; RV32XTHEADMEMPAIR-NEXT:    lh a1, 0(a0)
@@ -308,17 +308,17 @@ define i32 @lh(i16* %a) {
 ; RV64XTHEADMEMPAIR-NEXT:    lh a0, 2(a0)
 ; RV64XTHEADMEMPAIR-NEXT:    add a0, a1, a0
 ; RV64XTHEADMEMPAIR-NEXT:    ret
-  %1 = getelementptr i16, i16* %a, i64 0
-  %2 = load i16, i16* %1, align 4
-  %3 = getelementptr i16, i16* %a, i64 1
-  %4 = load i16, i16* %3, align 4
+  %1 = getelementptr i16, ptr %a, i64 0
+  %2 = load i16, ptr %1, align 4
+  %3 = getelementptr i16, ptr %a, i64 1
+  %4 = load i16, ptr %3, align 4
   %5 = sext i16 %2 to i32
   %6 = sext i16 %4 to i32
   %7 = add i32 %5, %6
   ret i32 %7
 }
 
-define i32 @lb(i8* %a) {
+define i32 @lb(ptr %a) {
 ; RV32XTHEADMEMPAIR-LABEL: lb:
 ; RV32XTHEADMEMPAIR:       # %bb.0:
 ; RV32XTHEADMEMPAIR-NEXT:    lb a1, 0(a0)
@@ -332,10 +332,10 @@ define i32 @lb(i8* %a) {
 ; RV64XTHEADMEMPAIR-NEXT:    lb a0, 1(a0)
 ; RV64XTHEADMEMPAIR-NEXT:    add a0, a1, a0
 ; RV64XTHEADMEMPAIR-NEXT:    ret
-  %1 = getelementptr i8, i8* %a, i64 0
-  %2 = load i8, i8* %1, align 4
-  %3 = getelementptr i8, i8* %a, i64 1
-  %4 = load i8, i8* %3, align 4
+  %1 = getelementptr i8, ptr %a, i64 0
+  %2 = load i8, ptr %1, align 4
+  %3 = getelementptr i8, ptr %a, i64 1
+  %4 = load i8, ptr %3, align 4
   %5 = sext i8 %2 to i32
   %6 = sext i8 %4 to i32
   %7 = add i32 %5, %6

diff  --git a/llvm/test/CodeGen/SPARC/2008-10-10-InlineAsmMemoryOperand.ll b/llvm/test/CodeGen/SPARC/2008-10-10-InlineAsmMemoryOperand.ll
index 706e9e1e33d14..7e70d21191a53 100644
--- a/llvm/test/CodeGen/SPARC/2008-10-10-InlineAsmMemoryOperand.ll
+++ b/llvm/test/CodeGen/SPARC/2008-10-10-InlineAsmMemoryOperand.ll
@@ -2,15 +2,15 @@
 ; PR 1557
 
 target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-f128:128:128"
- at llvm.global_ctors = appending global [1 x { i32, void ()*, i8* }] [ { i32, void ()*, i8* } { i32 65535, void ()* @set_fast_math, i8* null } ]		; <[1 x { i32, void ()*, i8* }]*> [#uses=0]
+ at llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [ { i32, ptr, ptr } { i32 65535, ptr @set_fast_math, ptr null } ]		; <[1 x { i32, void ()*, i8* }]*> [#uses=0]
 
 define internal void @set_fast_math() nounwind {
 entry:
 	%fsr = alloca i32		; <i32*> [#uses=4]
-	call void asm "st %fsr, $0", "=*m"(i32* elementtype(i32) %fsr) nounwind
-	%0 = load i32, i32* %fsr, align 4		; <i32> [#uses=1]
+	call void asm "st %fsr, $0", "=*m"(ptr elementtype(i32) %fsr) nounwind
+	%0 = load i32, ptr %fsr, align 4		; <i32> [#uses=1]
 	%1 = or i32 %0, 4194304		; <i32> [#uses=1]
-	store i32 %1, i32* %fsr, align 4
-	call void asm sideeffect "ld $0, %fsr", "*m"(i32* elementtype(i32) %fsr) nounwind
+	store i32 %1, ptr %fsr, align 4
+	call void asm sideeffect "ld $0, %fsr", "*m"(ptr elementtype(i32) %fsr) nounwind
 	ret void
 }

diff  --git a/llvm/test/CodeGen/SPARC/2008-10-10-InlineAsmRegOperand.ll b/llvm/test/CodeGen/SPARC/2008-10-10-InlineAsmRegOperand.ll
index 06ae4c3b8207a..04113bd1b4d01 100644
--- a/llvm/test/CodeGen/SPARC/2008-10-10-InlineAsmRegOperand.ll
+++ b/llvm/test/CodeGen/SPARC/2008-10-10-InlineAsmRegOperand.ll
@@ -7,8 +7,8 @@ module asm "\09.section\09.dtors,\22aw\22"
 
 define void @frame_dummy() nounwind {
 entry:
-	%asmtmp = tail call void (i8*)* (void (i8*)*) asm "", "=r,0"(void (i8*)* @_Jv_RegisterClasses) nounwind		; <void (i8*)*> [#uses=0]
+	%asmtmp = tail call ptr (ptr) asm "", "=r,0"(ptr @_Jv_RegisterClasses) nounwind		; <void (i8*)*> [#uses=0]
 	unreachable
 }
 
-declare void @_Jv_RegisterClasses(i8*)
+declare void @_Jv_RegisterClasses(ptr)

diff  --git a/llvm/test/CodeGen/SPARC/2009-08-28-PIC.ll b/llvm/test/CodeGen/SPARC/2009-08-28-PIC.ll
index baad2ae507d3d..0482d9ce7fd4d 100644
--- a/llvm/test/CodeGen/SPARC/2009-08-28-PIC.ll
+++ b/llvm/test/CodeGen/SPARC/2009-08-28-PIC.ll
@@ -14,7 +14,7 @@
 
 define i32 @func(i32 %a) nounwind readonly {
 entry:
-  %0 = load i32, i32* @foo, align 4                    ; <i32> [#uses=1]
+  %0 = load i32, ptr @foo, align 4                    ; <i32> [#uses=1]
   ret i32 %0
 }
 
@@ -36,7 +36,7 @@ entry:
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:
-  %ret =  load i32, i32* @foo, align 4
+  %ret =  load i32, ptr @foo, align 4
   ret i32 %ret
 
 if.end:

diff  --git a/llvm/test/CodeGen/SPARC/2011-01-11-CC.ll b/llvm/test/CodeGen/SPARC/2011-01-11-CC.ll
index 6135b8f2075c9..c7bf71bb06c5b 100644
--- a/llvm/test/CodeGen/SPARC/2011-01-11-CC.ll
+++ b/llvm/test/CodeGen/SPARC/2011-01-11-CC.ll
@@ -173,19 +173,19 @@ exit.1:
 ; SPARC64:       subxcc
 
 
-define void @test_adde_sube(i8* %a, i8* %b, i8* %sum, i8* %
diff ) {
+define void @test_adde_sube(ptr %a, ptr %b, ptr %sum, ptr %
diff ) {
 entry:
-   %0 = bitcast i8* %a to i128*
-   %1 = bitcast i8* %b to i128*
-   %2 = load i128, i128* %0
-   %3 = load i128, i128* %1
+   %0 = bitcast ptr %a to ptr
+   %1 = bitcast ptr %b to ptr
+   %2 = load i128, ptr %0
+   %3 = load i128, ptr %1
    %4 = add i128 %2, %3
-   %5 = bitcast i8* %sum to i128*
-   store i128 %4, i128* %5
-   tail call void asm sideeffect "", "=*m,*m"(i128* elementtype(i128) %0, i128* elementtype(i128) %5) nounwind
-   %6 = load i128, i128* %0
+   %5 = bitcast ptr %sum to ptr
+   store i128 %4, ptr %5
+   tail call void asm sideeffect "", "=*m,*m"(ptr elementtype(i128) %0, ptr elementtype(i128) %5) nounwind
+   %6 = load i128, ptr %0
    %7 = sub i128 %2, %6
-   %8 = bitcast i8* %
diff  to i128*
-   store i128 %7, i128* %8
+   %8 = bitcast ptr %
diff  to ptr
+   store i128 %7, ptr %8
    ret void
 }

diff  --git a/llvm/test/CodeGen/SPARC/2011-01-11-FrameAddr.ll b/llvm/test/CodeGen/SPARC/2011-01-11-FrameAddr.ll
index abd2b867ac3aa..b0249bef23454 100644
--- a/llvm/test/CodeGen/SPARC/2011-01-11-FrameAddr.ll
+++ b/llvm/test/CodeGen/SPARC/2011-01-11-FrameAddr.ll
@@ -5,7 +5,7 @@
 ;RUN: llc -march=sparcv9  < %s | FileCheck %s -check-prefix=SPARC64
 
 
-define i8* @frameaddr() nounwind readnone {
+define ptr @frameaddr() nounwind readnone {
 entry:
 ;V8-LABEL: frameaddr:
 ;V8: save %sp, -96, %sp
@@ -22,11 +22,11 @@ entry:
 ;SPARC64:       ret
 ;SPARC64:       restore %fp, 2047, %o0
 
-  %0 = tail call i8* @llvm.frameaddress(i32 0)
-  ret i8* %0
+  %0 = tail call ptr @llvm.frameaddress(i32 0)
+  ret ptr %0
 }
 
-define i8* @frameaddr2() nounwind readnone {
+define ptr @frameaddr2() nounwind readnone {
 entry:
 ;V8-LABEL: frameaddr2:
 ;V8: ta 3 ! encoding: [0x91,0xd0,0x20,0x03]
@@ -48,15 +48,15 @@ entry:
 ;SPARC64: ret
 ;SPARC64: restore %[[R2]], 2047, %o0
 
-  %0 = tail call i8* @llvm.frameaddress(i32 3)
-  ret i8* %0
+  %0 = tail call ptr @llvm.frameaddress(i32 3)
+  ret ptr %0
 }
 
-declare i8* @llvm.frameaddress(i32) nounwind readnone
+declare ptr @llvm.frameaddress(i32) nounwind readnone
 
 
 
-define i8* @retaddr() nounwind readnone {
+define ptr @retaddr() nounwind readnone {
 entry:
 ;V8-LABEL: retaddr:
 ;V8: mov %o7, {{.+}}
@@ -67,11 +67,11 @@ entry:
 ;SPARC64-LABEL: retaddr
 ;SPARC64:       mov %o7, {{.+}}
 
-  %0 = tail call i8* @llvm.returnaddress(i32 0)
-  ret i8* %0
+  %0 = tail call ptr @llvm.returnaddress(i32 0)
+  ret ptr %0
 }
 
-define i8* @retaddr2() nounwind readnone {
+define ptr @retaddr2() nounwind readnone {
 entry:
 ;V8-LABEL: retaddr2:
 ;V8: ta 3
@@ -91,11 +91,11 @@ entry:
 ;SPARC64: ldx [%[[R0]]+2159], %[[R1:[goli][0-7]]]
 ;SPARC64: ldx [%[[R1]]+2167], {{.+}}
 
-  %0 = tail call i8* @llvm.returnaddress(i32 3)
-  ret i8* %0
+  %0 = tail call ptr @llvm.returnaddress(i32 3)
+  ret ptr %0
 }
 
-define i8* @retaddr3() nounwind readnone {
+define ptr @retaddr3() nounwind readnone {
 entry:
 ;V8-LABEL: retaddr3:
 ;V8: ta 3
@@ -109,8 +109,8 @@ entry:
 ;SPARC64:       flushw
 ;SPARC64: ldx [%fp+2167],     %[[R0:[goli][0-7]]]
 
-  %0 = tail call i8* @llvm.returnaddress(i32 1)
-  ret i8* %0
+  %0 = tail call ptr @llvm.returnaddress(i32 1)
+  ret ptr %0
 }
 
-declare i8* @llvm.returnaddress(i32) nounwind readnone
+declare ptr @llvm.returnaddress(i32) nounwind readnone

diff  --git a/llvm/test/CodeGen/SPARC/2011-01-19-DelaySlot.ll b/llvm/test/CodeGen/SPARC/2011-01-19-DelaySlot.ll
index 90c35ce86e1fe..98005a6103226 100644
--- a/llvm/test/CodeGen/SPARC/2011-01-19-DelaySlot.ll
+++ b/llvm/test/CodeGen/SPARC/2011-01-19-DelaySlot.ll
@@ -14,7 +14,7 @@ entry:
   ret i32 %0
 }
 
-define i32 @test_jmpl(i32 (i32, i32)* nocapture %f, i32 %a, i32 %b) #0 {
+define i32 @test_jmpl(ptr nocapture %f, i32 %a, i32 %b) #0 {
 entry:
 ; CHECK:      test_jmpl
 ; CHECK:      call
@@ -84,7 +84,7 @@ entry:
 ;UNOPT-LABEL:       test_implicit_def:
 ;UNOPT:       call func
 ;UNOPT-NEXT:  nop
-  %0 = tail call i32 @func(i32* undef) nounwind
+  %0 = tail call i32 @func(ptr undef) nounwind
   ret i32 0
 }
 
@@ -105,7 +105,7 @@ entry:
 }
 
 
-declare i32 @func(i32*)
+declare i32 @func(ptr)
 
 
 define i32 @restore_add(i32 %a, i32 %b) {

diff  --git a/llvm/test/CodeGen/SPARC/2011-01-21-ByValArgs.ll b/llvm/test/CodeGen/SPARC/2011-01-21-ByValArgs.ll
index cd95fbcf052fc..05a1ea47bf112 100644
--- a/llvm/test/CodeGen/SPARC/2011-01-21-ByValArgs.ll
+++ b/llvm/test/CodeGen/SPARC/2011-01-21-ByValArgs.ll
@@ -11,8 +11,8 @@ entry:
 ;CHECK:     st
 ;CHECK:     st
 ;CHECK:     bar
-  %0 = tail call i32 @bar(%struct.foo_t* byval(%struct.foo_t) @s) nounwind
+  %0 = tail call i32 @bar(ptr byval(%struct.foo_t) @s) nounwind
   ret i32 %0
 }
 
-declare i32 @bar(%struct.foo_t* byval(%struct.foo_t))
+declare i32 @bar(ptr byval(%struct.foo_t))

diff  --git a/llvm/test/CodeGen/SPARC/2011-01-22-SRet.ll b/llvm/test/CodeGen/SPARC/2011-01-22-SRet.ll
index 445edc9e89e8f..60b0f8918803e 100644
--- a/llvm/test/CodeGen/SPARC/2011-01-22-SRet.ll
+++ b/llvm/test/CodeGen/SPARC/2011-01-22-SRet.ll
@@ -2,17 +2,17 @@
 
 %struct.foo_t = type { i32, i32, i32 }
 
-define weak void @make_foo(%struct.foo_t* noalias sret(%struct.foo_t) %agg.result, i32 %a, i32 %b, i32 %c) nounwind {
+define weak void @make_foo(ptr noalias sret(%struct.foo_t) %agg.result, i32 %a, i32 %b, i32 %c) nounwind {
 entry:
 ;CHECK-LABEL: make_foo:
 ;CHECK: ld [%sp+64], {{.+}}
 ;CHECK: jmp %o7+12
-  %0 = getelementptr inbounds %struct.foo_t, %struct.foo_t* %agg.result, i32 0, i32 0
-  store i32 %a, i32* %0, align 4
-  %1 = getelementptr inbounds %struct.foo_t, %struct.foo_t* %agg.result, i32 0, i32 1
-  store i32 %b, i32* %1, align 4
-  %2 = getelementptr inbounds %struct.foo_t, %struct.foo_t* %agg.result, i32 0, i32 2
-  store i32 %c, i32* %2, align 4
+  %0 = getelementptr inbounds %struct.foo_t, ptr %agg.result, i32 0, i32 0
+  store i32 %a, ptr %0, align 4
+  %1 = getelementptr inbounds %struct.foo_t, ptr %agg.result, i32 0, i32 1
+  store i32 %b, ptr %1, align 4
+  %2 = getelementptr inbounds %struct.foo_t, ptr %agg.result, i32 0, i32 2
+  store i32 %c, ptr %2, align 4
   ret void
 }
 
@@ -23,13 +23,13 @@ entry:
 ;CHECK: st {{.+}}, [%sp+64]
 ;CHECK: unimp 12
   %f = alloca %struct.foo_t, align 8
-  call void @make_foo(%struct.foo_t* noalias sret(%struct.foo_t) %f, i32 10, i32 20, i32 30) nounwind
-  %0 = getelementptr inbounds %struct.foo_t, %struct.foo_t* %f, i32 0, i32 0
-  %1 = load i32, i32* %0, align 8
-  %2 = getelementptr inbounds %struct.foo_t, %struct.foo_t* %f, i32 0, i32 1
-  %3 = load i32, i32* %2, align 4
-  %4 = getelementptr inbounds %struct.foo_t, %struct.foo_t* %f, i32 0, i32 2
-  %5 = load i32, i32* %4, align 8
+  call void @make_foo(ptr noalias sret(%struct.foo_t) %f, i32 10, i32 20, i32 30) nounwind
+  %0 = getelementptr inbounds %struct.foo_t, ptr %f, i32 0, i32 0
+  %1 = load i32, ptr %0, align 8
+  %2 = getelementptr inbounds %struct.foo_t, ptr %f, i32 0, i32 1
+  %3 = load i32, ptr %2, align 4
+  %4 = getelementptr inbounds %struct.foo_t, ptr %f, i32 0, i32 2
+  %5 = load i32, ptr %4, align 8
   %6 = add nsw i32 %3, %1
   %7 = add nsw i32 %6, %5
   ret i32 %7

diff  --git a/llvm/test/CodeGen/SPARC/2011-12-03-TailDuplication.ll b/llvm/test/CodeGen/SPARC/2011-12-03-TailDuplication.ll
index aa7de1618ebb0..bc390853143d3 100644
--- a/llvm/test/CodeGen/SPARC/2011-12-03-TailDuplication.ll
+++ b/llvm/test/CodeGen/SPARC/2011-12-03-TailDuplication.ll
@@ -11,7 +11,7 @@ if.end.0:
   br i1 undef, label %if.then.1, label %else.1
 
 else.1:
-  %0 = bitcast i8* undef to i8**
+  %0 = bitcast ptr undef to ptr
   br label %else.1.2
 
 if.then.1:

diff  --git a/llvm/test/CodeGen/SPARC/2012-05-01-LowerArguments.ll b/llvm/test/CodeGen/SPARC/2012-05-01-LowerArguments.ll
index a607f109762f2..541949c7e0343 100644
--- a/llvm/test/CodeGen/SPARC/2012-05-01-LowerArguments.ll
+++ b/llvm/test/CodeGen/SPARC/2012-05-01-LowerArguments.ll
@@ -4,10 +4,10 @@
 
 target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-f128:128:128"
 target triple = "sparc-unknown-linux-gnu"
-	%"5tango4core9Exception11IOException" = type { [5 x i8*]*, i8*, { i64, i8* }, { i64, i8* }, i64, %"6Object7Monitor"*, %"5tango4core9Exception11IOException"* }
-	%"6Object7Monitor" = type { [3 x i8*]*, i8* }
+	%"5tango4core9Exception11IOException" = type { ptr, ptr, { i64, ptr }, { i64, ptr }, i64, ptr, ptr }
+	%"6Object7Monitor" = type { ptr, ptr }
 
-define fastcc %"5tango4core9Exception11IOException"* @_D5tango4core9Exception13TextException5_ctorMFAaZC5tango4core9Exception13TextException(%"5tango4core9Exception11IOException"* %this, { i64, i8* } %msg) {
+define fastcc ptr @_D5tango4core9Exception13TextException5_ctorMFAaZC5tango4core9Exception13TextException(ptr %this, { i64, ptr } %msg) {
 entry_tango.core.Exception.TextException.this:
 	unreachable
 }

diff  --git a/llvm/test/CodeGen/SPARC/2013-05-17-CallFrame.ll b/llvm/test/CodeGen/SPARC/2013-05-17-CallFrame.ll
index 8d6e9ca0a1b57..3f6d28385e267 100644
--- a/llvm/test/CodeGen/SPARC/2013-05-17-CallFrame.ll
+++ b/llvm/test/CodeGen/SPARC/2013-05-17-CallFrame.ll
@@ -68,8 +68,8 @@ define void @variable_alloca_with_adj_call_stack(i32 %num) {
 ; SPARC64-NEXT:    restore
 entry:
   %0 = alloca i8, i32 %num, align 8
-  call void @foo(i8* %0, i8* %0, i8* %0, i8* %0, i8* %0, i8* %0, i8* %0, i8* %0, i8* %0, i8* %0)
+  call void @foo(ptr %0, ptr %0, ptr %0, ptr %0, ptr %0, ptr %0, ptr %0, ptr %0, ptr %0, ptr %0)
   ret void
 }
 
-declare void @foo(i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*);
+declare void @foo(ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr);

diff  --git a/llvm/test/CodeGen/SPARC/32abi.ll b/llvm/test/CodeGen/SPARC/32abi.ll
index b5f75b1982e02..15ba6d408be44 100644
--- a/llvm/test/CodeGen/SPARC/32abi.ll
+++ b/llvm/test/CodeGen/SPARC/32abi.ll
@@ -21,21 +21,21 @@ define void @intarg(i8  %a0,   ; %i0
                     i8  %a1,   ; %i1
                     i16 %a2,   ; %i2
                     i32 %a3,   ; %i3
-                    i8* %a4,   ; %i4
+                    ptr %a4,   ; %i4
                     i32 %a5,   ; %i5
                     i32 signext %a6,   ; [%fp+92]
-                    i8* %a7) { ; [%fp+96]
-  store volatile i8 %a0, i8* %a4
-  store volatile i8 %a1, i8* %a4
-  %p16 = bitcast i8* %a4 to i16*
-  store volatile i16 %a2, i16* %p16
-  %p32 = bitcast i8* %a4 to i32*
-  store volatile i32 %a3, i32* %p32
-  %pp = bitcast i8* %a4 to i8**
-  store volatile i8* %a4, i8** %pp
-  store volatile i32 %a5, i32* %p32
-  store volatile i32 %a6, i32* %p32
-  store volatile i8* %a7, i8** %pp
+                    ptr %a7) { ; [%fp+96]
+  store volatile i8 %a0, ptr %a4
+  store volatile i8 %a1, ptr %a4
+  %p16 = bitcast ptr %a4 to ptr
+  store volatile i16 %a2, ptr %p16
+  %p32 = bitcast ptr %a4 to ptr
+  store volatile i32 %a3, ptr %p32
+  %pp = bitcast ptr %a4 to ptr
+  store volatile ptr %a4, ptr %pp
+  store volatile i32 %a5, ptr %p32
+  store volatile i32 %a6, ptr %p32
+  store volatile ptr %a7, ptr %pp
   ret void
 }
 
@@ -47,8 +47,8 @@ define void @intarg(i8  %a0,   ; %i0
 ; CHECK: call intarg
 ; CHECK-NOT: add %sp
 ; CHECK: restore
-define void @call_intarg(i32 %i0, i8* %i1) {
-  call void @intarg(i8 0, i8 1, i16 2, i32 3, i8* undef, i32 5, i32 %i0, i8* %i1)
+define void @call_intarg(i32 %i0, ptr %i1) {
+  call void @intarg(i8 0, i8 1, i16 2, i32 3, ptr undef, i32 5, i32 %i0, ptr %i1)
   ret void
 }
 
@@ -166,10 +166,10 @@ define double @floatarg(double %a0,   ; %i0,%i1
 ; SOFT-NEXT:  call floatarg
 ; SOFT:  std %o0, [%i4]
 ; CHECK: restore
-define void @call_floatarg(float %f1, double %d2, float %f5, double *%p) {
+define void @call_floatarg(float %f1, double %d2, float %f5, ptr %p) {
   %r = call double @floatarg(double %d2, float %f1, double %d2, double %d2,
                              double %d2, float %f1)
-  store double %r, double* %p
+  store double %r, ptr %p
   ret void
 }
 
@@ -242,8 +242,8 @@ define i64 @i64arg(i64 %a0,    ; %i0,%i1
 ; CHECK: std %o0, [%i3]
 ; CHECK-NEXT: restore
 
-define void @call_i64arg(i32 %a0, i64 %a1, i64* %p) {
+define void @call_i64arg(i32 %a0, i64 %a1, ptr %p) {
   %r = call i64 @i64arg(i64 %a1, i32 %a0, i64 %a1, i64 %a1, i64 %a1, i32 %a0)
-  store i64 %r, i64* %p
+  store i64 %r, ptr %p
   ret void
 }

diff  --git a/llvm/test/CodeGen/SPARC/64abi.ll b/llvm/test/CodeGen/SPARC/64abi.ll
index 27865f718151e..8d2f152ef7564 100644
--- a/llvm/test/CodeGen/SPARC/64abi.ll
+++ b/llvm/test/CodeGen/SPARC/64abi.ll
@@ -20,21 +20,21 @@ define void @intarg(i8  %a0,   ; %i0
                     i8  %a1,   ; %i1
                     i16 %a2,   ; %i2
                     i32 %a3,   ; %i3
-                    i8* %a4,   ; %i4
+                    ptr %a4,   ; %i4
                     i32 %a5,   ; %i5
                     i32 signext %a6,   ; [%fp+BIAS+176]
-                    i8* %a7) { ; [%fp+BIAS+184]
-  store volatile i8 %a0, i8* %a4
-  store volatile i8 %a1, i8* %a4
-  %p16 = bitcast i8* %a4 to i16*
-  store volatile i16 %a2, i16* %p16
-  %p32 = bitcast i8* %a4 to i32*
-  store volatile i32 %a3, i32* %p32
-  %pp = bitcast i8* %a4 to i8**
-  store volatile i8* %a4, i8** %pp
-  store volatile i32 %a5, i32* %p32
-  store volatile i32 %a6, i32* %p32
-  store volatile i8* %a7, i8** %pp
+                    ptr %a7) { ; [%fp+BIAS+184]
+  store volatile i8 %a0, ptr %a4
+  store volatile i8 %a1, ptr %a4
+  %p16 = bitcast ptr %a4 to ptr
+  store volatile i16 %a2, ptr %p16
+  %p32 = bitcast ptr %a4 to ptr
+  store volatile i32 %a3, ptr %p32
+  %pp = bitcast ptr %a4 to ptr
+  store volatile ptr %a4, ptr %pp
+  store volatile i32 %a5, ptr %p32
+  store volatile i32 %a6, ptr %p32
+  store volatile ptr %a7, ptr %pp
   ret void
 }
 
@@ -49,8 +49,8 @@ define void @intarg(i8  %a0,   ; %i0
 ; CHECK: call intarg
 ; CHECK-NOT: add %sp
 ; CHECK: restore
-define void @call_intarg(i32 %i0, i8* %i1) {
-  call void @intarg(i8 0, i8 1, i16 2, i32 3, i8* undef, i32 5, i32 signext %i0, i8* %i1)
+define void @call_intarg(i32 %i0, ptr %i1) {
+  call void @intarg(i8 0, i8 1, i16 2, i32 3, ptr undef, i32 5, i32 signext %i0, ptr %i1)
   ret void
 }
 
@@ -128,13 +128,13 @@ define double @floatarg(float %a0,    ; %f1
 ; CHECK-NOT: add %sp
 ; CHECK: restore
 
-define void @call_floatarg(float %f1, double %d2, float %f5, double *%p) {
+define void @call_floatarg(float %f1, double %d2, float %f5, ptr %p) {
   %r = call double @floatarg(float %f5, double %d2, double %d2, double %d2,
                              float %f5, float %f5,  float %f5,  float %f5,
                              float %f5, float %f5,  float %f5,  float %f5,
                              float %f5, float %f5,  float %f5,  float %f5,
                              float %f1, double %d2)
-  store double %r, double* %p
+  store double %r, ptr %p
   ret void
 }
 
@@ -161,14 +161,14 @@ define void @mixedarg(i8 %a0,      ; %i0
                       i13 %a4,     ; %i4
                       float %a5,   ; %f11
                       i64 %a6,     ; [%fp+BIAS+176]
-                      double *%a7, ; [%fp+BIAS+184]
+                      ptr %a7, ; [%fp+BIAS+184]
                       double %a8,  ; %d16
-                      i16* %a9) {  ; [%fp+BIAS+200]
+                      ptr %a9) {  ; [%fp+BIAS+200]
   %d1 = fpext float %a1 to double
   %s3 = fadd double %a3, %d1
   %s8 = fadd double %a8, %s3
-  store double %s8, double* %a7
-  store i16 %a2, i16* %a9
+  store double %s8, ptr %a7
+  store i16 %a2, ptr %a9
   ret void
 }
 
@@ -183,7 +183,7 @@ define void @mixedarg(i8 %a0,      ; %i0
 ; CHECK-NOT: add %sp
 ; CHECK: restore
 
-define void @call_mixedarg(i64 %i0, double %f2, i16* %i2) {
+define void @call_mixedarg(i64 %i0, double %f2, ptr %i2) {
   call void @mixedarg(i8 undef,
                       float undef,
                       i16 undef,
@@ -191,9 +191,9 @@ define void @call_mixedarg(i64 %i0, double %f2, i16* %i2) {
                       i13 undef,
                       float undef,
                       i64 %i0,
-                      double* undef,
+                      ptr undef,
                       double %f2,
-                      i16* %i2)
+                      ptr %i2)
   ret void
 }
 
@@ -221,7 +221,7 @@ define i32 @inreg_fi(i32 inreg %a0,     ; high bits of %i0
 ; SOFT:  sllx %i1, 32, %i1
 ; SOFT:  or %i1, %i0, %o0
 ; CHECK: call inreg_fi
-define void @call_inreg_fi(i32* %p, i32 %i1, float %f5) {
+define void @call_inreg_fi(ptr %p, i32 %i1, float %f5) {
   %x = call i32 @inreg_fi(i32 inreg %i1, float inreg %f5)
   ret void
 }
@@ -244,7 +244,7 @@ define float @inreg_ff(float inreg %a0,   ; %f0
 ; SOFT: sllx %i1, 32, %i1
 ; SOFT: or %i1, %i0, %o0
 ; CHECK: call inreg_ff
-define void @call_inreg_ff(i32* %p, float %f3, float %f5) {
+define void @call_inreg_ff(ptr %p, float %f3, float %f5) {
   %x = call float @inreg_ff(float inreg %f3, float inreg %f5)
   ret void
 }
@@ -268,7 +268,7 @@ define i32 @inreg_if(float inreg %a0, ; %f0
 ; SOFT: sllx %i1, 32, %i1
 ; SOFT: or %i1, %i0, %o0
 ; CHECK: call inreg_if
-define void @call_inreg_if(i32* %p, float %f3, i32 %i2) {
+define void @call_inreg_if(ptr %p, float %f3, i32 %i2) {
   %x = call i32 @inreg_if(float inreg %f3, i32 inreg %i2)
   ret void
 }
@@ -288,7 +288,7 @@ define i32 @inreg_ii(i32 inreg %a0,   ; high bits of %i0
 ; CHECK: sllx %i1, 32, [[R1:%[gilo][0-7]]]
 ; CHECK: or [[R1]], [[R2]], %o0
 ; CHECK: call inreg_ii
-define void @call_inreg_ii(i32* %p, i32 %i1, i32 %i2) {
+define void @call_inreg_ii(ptr %p, i32 %i1, i32 %i2) {
   %x = call i32 @inreg_ii(i32 inreg %i1, i32 inreg %i2)
   ret void
 }
@@ -299,11 +299,11 @@ define void @call_inreg_ii(i32* %p, i32 %i1, i32 %i2) {
 ; HARD: ld [%i3], %f2
 ; SOFT: ld [%i3], %i1
 define { i32, float } @ret_i32_float_pair(i32 %a0, i32 %a1,
-                                          i32* %p, float* %q) {
-  %r1 = load i32, i32* %p
+                                          ptr %p, ptr %q) {
+  %r1 = load i32, ptr %p
   %rv1 = insertvalue { i32, float } undef, i32 %r1, 0
-  store i32 0, i32* %p
-  %r2 = load float, float* %q
+  store i32 0, ptr %p
+  %r2 = load float, ptr %q
   %rv2 = insertvalue { i32, float } %rv1, float %r2, 1
   ret { i32, float } %rv2
 }
@@ -313,13 +313,13 @@ define { i32, float } @ret_i32_float_pair(i32 %a0, i32 %a1,
 ; CHECK: st %o0, [%i0]
 ; HARD: st %f2, [%i1]
 ; SOFT: st %o1, [%i1]
-define void @call_ret_i32_float_pair(i32* %i0, float* %i1) {
+define void @call_ret_i32_float_pair(ptr %i0, ptr %i1) {
   %rv = call { i32, float } @ret_i32_float_pair(i32 undef, i32 undef,
-                                                i32* undef, float* undef)
+                                                ptr undef, ptr undef)
   %e0 = extractvalue { i32, float } %rv, 0
-  store i32 %e0, i32* %i0
+  store i32 %e0, ptr %i0
   %e1 = extractvalue { i32, float } %rv, 1
-  store float %e1, float* %i1
+  store float %e1, ptr %i1
   ret void
 }
 
@@ -330,11 +330,11 @@ define void @call_ret_i32_float_pair(i32* %i0, float* %i1) {
 ; SOFT: ld [%i3], %i1
 ; CHECK: sllx [[R]], 32, %i0
 define inreg { i32, float } @ret_i32_float_packed(i32 %a0, i32 %a1,
-                                                  i32* %p, float* %q) {
-  %r1 = load i32, i32* %p
+                                                  ptr %p, ptr %q) {
+  %r1 = load i32, ptr %p
   %rv1 = insertvalue { i32, float } undef, i32 %r1, 0
-  store i32 0, i32* %p
-  %r2 = load float, float* %q
+  store i32 0, ptr %p
+  %r2 = load float, ptr %q
   %rv2 = insertvalue { i32, float } %rv1, float %r2, 1
   ret { i32, float } %rv2
 }
@@ -345,13 +345,13 @@ define inreg { i32, float } @ret_i32_float_packed(i32 %a0, i32 %a1,
 ; CHECK: st [[R]], [%i0]
 ; HARD: st %f1, [%i1]
 ; SOFT: st %o0, [%i1]
-define void @call_ret_i32_float_packed(i32* %i0, float* %i1) {
+define void @call_ret_i32_float_packed(ptr %i0, ptr %i1) {
   %rv = call { i32, float } @ret_i32_float_packed(i32 undef, i32 undef,
-                                                  i32* undef, float* undef)
+                                                  ptr undef, ptr undef)
   %e0 = extractvalue { i32, float } %rv, 0
-  store i32 %e0, i32* %i0
+  store i32 %e0, ptr %i0
   %e1 = extractvalue { i32, float } %rv, 1
-  store float %e1, float* %i1
+  store float %e1, ptr %i1
   ret void
 }
 
@@ -363,11 +363,11 @@ define void @call_ret_i32_float_packed(i32* %i0, float* %i1) {
 ; CHECK: sllx [[R2]], 32, [[R3:%[gilo][0-7]]]
 ; CHECK: or [[R3]], [[R1]], %i0
 define inreg { i32, i32 } @ret_i32_packed(i32 %a0, i32 %a1,
-                                          i32* %p, i32* %q) {
-  %r1 = load i32, i32* %p
+                                          ptr %p, ptr %q) {
+  %r1 = load i32, ptr %p
   %rv1 = insertvalue { i32, i32 } undef, i32 %r1, 1
-  store i32 0, i32* %p
-  %r2 = load i32, i32* %q
+  store i32 0, ptr %p
+  %r2 = load i32, ptr %q
   %rv2 = insertvalue { i32, i32 } %rv1, i32 %r2, 0
   ret { i32, i32 } %rv2
 }
@@ -377,13 +377,13 @@ define inreg { i32, i32 } @ret_i32_packed(i32 %a0, i32 %a1,
 ; CHECK: srlx %o0, 32, [[R:%[gilo][0-7]]]
 ; CHECK: st [[R]], [%i0]
 ; CHECK: st %o0, [%i1]
-define void @call_ret_i32_packed(i32* %i0, i32* %i1) {
+define void @call_ret_i32_packed(ptr %i0, ptr %i1) {
   %rv = call { i32, i32 } @ret_i32_packed(i32 undef, i32 undef,
-                                          i32* undef, i32* undef)
+                                          ptr undef, ptr undef)
   %e0 = extractvalue { i32, i32 } %rv, 0
-  store i32 %e0, i32* %i0
+  store i32 %e0, ptr %i0
   %e1 = extractvalue { i32, i32 } %rv, 1
-  store i32 %e1, i32* %i1
+  store i32 %e1, ptr %i1
   ret void
 }
 
@@ -440,12 +440,12 @@ entry:
 define i32 @test_large_stack() {
 entry:
   %buffer1 = alloca [16384 x i8], align 8
-  %buffer1.sub = getelementptr inbounds [16384 x i8], [16384 x i8]* %buffer1, i32 0, i32 0
-  %0 = call i32 @use_buf(i32 16384, i8* %buffer1.sub)
+  %buffer1.sub = getelementptr inbounds [16384 x i8], ptr %buffer1, i32 0, i32 0
+  %0 = call i32 @use_buf(i32 16384, ptr %buffer1.sub)
   ret i32 %0
 }
 
-declare i32 @use_buf(i32, i8*)
+declare i32 @use_buf(i32, ptr)
 
 ; CHECK-LABEL: test_fp128_args:
 ; HARD-DAG:   std %f0, [%fp+{{.+}}]

diff  --git a/llvm/test/CodeGen/SPARC/64atomics.ll b/llvm/test/CodeGen/SPARC/64atomics.ll
index 89175b6242e63..2c00f955f497b 100644
--- a/llvm/test/CodeGen/SPARC/64atomics.ll
+++ b/llvm/test/CodeGen/SPARC/64atomics.ll
@@ -7,12 +7,12 @@
 ; CHECK:       membar
 ; CHECK:       membar
 ; CHECK:       stx {{.+}}, [%o2]
-define i64 @test_atomic_i64(i64* %ptr1, i64* %ptr2, i64* %ptr3) {
+define i64 @test_atomic_i64(ptr %ptr1, ptr %ptr2, ptr %ptr3) {
 entry:
-  %0 = load atomic i64, i64* %ptr1 acquire, align 8
-  %1 = load atomic i64, i64* %ptr2 acquire, align 8
+  %0 = load atomic i64, ptr %ptr1 acquire, align 8
+  %1 = load atomic i64, ptr %ptr2 acquire, align 8
   %2 = add i64 %0, %1
-  store atomic i64 %2, i64* %ptr3 release, align 8
+  store atomic i64 %2, ptr %ptr3 release, align 8
   ret i64 %2
 }
 
@@ -20,9 +20,9 @@ entry:
 ; CHECK:       mov 123, [[R:%[gilo][0-7]]]
 ; CHECK:       casx [%o1], %o0, [[R]]
 
-define i64 @test_cmpxchg_i64(i64 %a, i64* %ptr) {
+define i64 @test_cmpxchg_i64(i64 %a, ptr %ptr) {
 entry:
-  %pair = cmpxchg i64* %ptr, i64 %a, i64 123 monotonic monotonic
+  %pair = cmpxchg ptr %ptr, i64 %a, i64 123 monotonic monotonic
   %b = extractvalue { i64, i1 } %pair, 0
   ret i64 %b
 }
@@ -30,9 +30,9 @@ entry:
 ; CHECK-LABEL: test_swap_i64
 ; CHECK:       casx [%o1],
 
-define i64 @test_swap_i64(i64 %a, i64* %ptr) {
+define i64 @test_swap_i64(i64 %a, ptr %ptr) {
 entry:
-  %b = atomicrmw xchg i64* %ptr, i64 42 monotonic
+  %b = atomicrmw xchg ptr %ptr, i64 42 monotonic
   ret i64 %b
 }
 
@@ -41,9 +41,9 @@ entry:
 ; CHECK: sub
 ; CHECK: casx [%o0]
 ; CHECK: membar
-define zeroext i64 @test_load_sub_64(i64* %p, i64 zeroext %v) {
+define zeroext i64 @test_load_sub_64(ptr %p, i64 zeroext %v) {
 entry:
-  %0 = atomicrmw sub i64* %p, i64 %v seq_cst
+  %0 = atomicrmw sub ptr %p, i64 %v seq_cst
   ret i64 %0
 }
 
@@ -53,8 +53,8 @@ entry:
 ; CHECK: movg %xcc
 ; CHECK: casx [%o0]
 ; CHECK: membar
-define zeroext i64 @test_load_max_64(i64* %p, i64 zeroext %v) {
+define zeroext i64 @test_load_max_64(ptr %p, i64 zeroext %v) {
 entry:
-  %0 = atomicrmw max i64* %p, i64 %v seq_cst
+  %0 = atomicrmw max ptr %p, i64 %v seq_cst
   ret i64 %0
 }

diff  --git a/llvm/test/CodeGen/SPARC/64bit.ll b/llvm/test/CodeGen/SPARC/64bit.ll
index b84f58f38a97a..7b30d7abebc08 100644
--- a/llvm/test/CodeGen/SPARC/64bit.ll
+++ b/llvm/test/CodeGen/SPARC/64bit.ll
@@ -140,21 +140,21 @@ define i64 @reg_imm_alu(i64 %x, i64 %y, i64 %z) {
 ; CHECK: stx %
 ; CHECK: ldsh [%i3]
 ; CHECK: sth %
-define i64 @loads(i64* %p, i32* %q, i32* %r, i16* %s) {
-  %a = load i64, i64* %p
+define i64 @loads(ptr %p, ptr %q, ptr %r, ptr %s) {
+  %a = load i64, ptr %p
   %ai = add i64 1, %a
-  store i64 %ai, i64* %p
-  %b = load i32, i32* %q
+  store i64 %ai, ptr %p
+  %b = load i32, ptr %q
   %b2 = zext i32 %b to i64
   %bi = trunc i64 %ai to i32
-  store i32 %bi, i32* %q
-  %c = load i32, i32* %r
+  store i32 %bi, ptr %q
+  %c = load i32, ptr %r
   %c2 = sext i32 %c to i64
-  store i64 %ai, i64* %p
-  %d = load i16, i16* %s
+  store i64 %ai, ptr %p
+  %d = load i16, ptr %s
   %d2 = sext i16 %d to i64
   %di = trunc i64 %ai to i16
-  store i16 %di, i16* %s
+  store i16 %di, ptr %s
 
   %x1 = add i64 %a, %b2
   %x2 = add i64 %c2, %d2
@@ -164,8 +164,8 @@ define i64 @loads(i64* %p, i32* %q, i32* %r, i16* %s) {
 
 ; CHECK: load_bool
 ; CHECK: ldub [%i0], %i0
-define i64 @load_bool(i1* %p) {
-  %a = load i1, i1* %p
+define i64 @load_bool(ptr %p) {
+  %a = load i1, ptr %p
   %b = zext i1 %a to i64
   ret i64 %b
 }
@@ -176,23 +176,23 @@ define i64 @load_bool(i1* %p) {
 ; CHECK: st [[R]], [%i1+-8]
 ; CHECK: sth [[R]], [%i2+40]
 ; CHECK: stb [[R]], [%i3+-20]
-define void @stores(i64* %p, i32* %q, i16* %r, i8* %s) {
-  %p1 = getelementptr i64, i64* %p, i64 1
-  %p2 = getelementptr i64, i64* %p, i64 2
-  %pv = load i64, i64* %p1
-  store i64 %pv, i64* %p2
+define void @stores(ptr %p, ptr %q, ptr %r, ptr %s) {
+  %p1 = getelementptr i64, ptr %p, i64 1
+  %p2 = getelementptr i64, ptr %p, i64 2
+  %pv = load i64, ptr %p1
+  store i64 %pv, ptr %p2
 
-  %q2 = getelementptr i32, i32* %q, i32 -2
+  %q2 = getelementptr i32, ptr %q, i32 -2
   %qv = trunc i64 %pv to i32
-  store i32 %qv, i32* %q2
+  store i32 %qv, ptr %q2
 
-  %r2 = getelementptr i16, i16* %r, i16 20
+  %r2 = getelementptr i16, ptr %r, i16 20
   %rv = trunc i64 %pv to i16
-  store i16 %rv, i16* %r2
+  store i16 %rv, ptr %r2
 
-  %s2 = getelementptr i8, i8* %s, i8 -20
+  %s2 = getelementptr i8, ptr %s, i8 -20
   %sv = trunc i64 %pv to i8
-  store i8 %sv, i8* %s2
+  store i8 %sv, ptr %s2
 
   ret void
 }
@@ -200,9 +200,9 @@ define void @stores(i64* %p, i32* %q, i16* %r, i8* %s) {
 ; CHECK: promote_shifts
 ; CHECK: ldub [%i0], [[R:%[goli][0-7]]]
 ; CHECK: sll [[R]], [[R]], %i0
-define i8 @promote_shifts(i8* %p) {
-  %L24 = load i8, i8* %p
-  %L32 = load i8, i8* %p
+define i8 @promote_shifts(ptr %p) {
+  %L24 = load i8, ptr %p
+  %L32 = load i8, ptr %p
   %B36 = shl i8 %L24, %L32
   ret i8 %B36
 }
@@ -231,12 +231,12 @@ define i64 @unsigned_divide(i64 %a, i64 %b) {
 define void @access_fi() {
 entry:
   %b = alloca [32 x i8], align 1
-  %arraydecay = getelementptr inbounds [32 x i8], [32 x i8]* %b, i64 0, i64 0
-  call void @g(i8* %arraydecay) #2
+  %arraydecay = getelementptr inbounds [32 x i8], ptr %b, i64 0, i64 0
+  call void @g(ptr %arraydecay) #2
   ret void
 }
 
-declare void @g(i8*)
+declare void @g(ptr)
 
 ; CHECK: expand_setcc
 ; CHECK: movrgz %i0, 1,
@@ -278,11 +278,11 @@ define double @bitcast_f64_i64(i64 %x) {
 ; OPT-LABEL:  store_zero:
 ; OPT:  stx %g0, [%o0]
 ; OPT:  stx %g0, [%o1+8]
-define i64 @store_zero(i64* nocapture %a, i64* nocapture %b) {
+define i64 @store_zero(ptr nocapture %a, ptr nocapture %b) {
 entry:
-  store i64 0, i64* %a, align 8
-  %0 = getelementptr inbounds i64, i64* %b, i32 1
-  store i64 0, i64* %0, align 8
+  store i64 0, ptr %a, align 8
+  %0 = getelementptr inbounds i64, ptr %b, i32 1
+  store i64 0, ptr %0, align 8
   ret i64 0
 }
 

diff  --git a/llvm/test/CodeGen/SPARC/64cond.ll b/llvm/test/CodeGen/SPARC/64cond.ll
index fa0c53f02a358..10d070055a4ec 100644
--- a/llvm/test/CodeGen/SPARC/64cond.ll
+++ b/llvm/test/CodeGen/SPARC/64cond.ll
@@ -4,13 +4,13 @@
 ; CHECK: cmpri
 ; CHECK: cmp %i1, 1
 ; CHECK: be %xcc,
-define void @cmpri(i64* %p, i64 %x) {
+define void @cmpri(ptr %p, i64 %x) {
 entry:
   %tobool = icmp eq i64 %x, 1
   br i1 %tobool, label %if.end, label %if.then
 
 if.then:
-  store i64 %x, i64* %p, align 8
+  store i64 %x, ptr %p, align 8
   br label %if.end
 
 if.end:
@@ -20,13 +20,13 @@ if.end:
 ; CHECK: cmprr
 ; CHECK: cmp %i1, %i2
 ; CHECK: bgu %xcc,
-define void @cmprr(i64* %p, i64 %x, i64 %y) {
+define void @cmprr(ptr %p, i64 %x, i64 %y) {
 entry:
   %tobool = icmp ugt i64 %x, %y
   br i1 %tobool, label %if.end, label %if.then
 
 if.then:
-  store i64 %x, i64* %p, align 8
+  store i64 %x, ptr %p, align 8
   br label %if.end
 
 if.end:

diff  --git a/llvm/test/CodeGen/SPARC/LeonCASAInstructionUT.ll b/llvm/test/CodeGen/SPARC/LeonCASAInstructionUT.ll
index 18c98091da7c8..740c17be43658 100644
--- a/llvm/test/CodeGen/SPARC/LeonCASAInstructionUT.ll
+++ b/llvm/test/CodeGen/SPARC/LeonCASAInstructionUT.ll
@@ -22,8 +22,8 @@
 ; CHECK-DAG:   mov 1, [[R0:%[a-z0-9]+]]
 ; CHECK-DAG:   mov %g0, [[R1:%[a-z0-9]+]]
 ; CHECK:       casa [{{%[a-z0-9]+}}] 10, [[R1]], [[R0]]
-define void @casa_test(i32* %ptr) {
-  %pair = cmpxchg i32* %ptr, i32 0, i32 1 monotonic monotonic
+define void @casa_test(ptr %ptr) {
+  %pair = cmpxchg ptr %ptr, i32 0, i32 1 monotonic monotonic
   %r = extractvalue { i32, i1 } %pair, 0
   %stored1  = icmp eq i32 %r, 0
 

diff  --git a/llvm/test/CodeGen/SPARC/LeonFixAllFDIVSQRTPassUT.ll b/llvm/test/CodeGen/SPARC/LeonFixAllFDIVSQRTPassUT.ll
index c4c022d9f72f2..c5cfdd8c151c2 100644
--- a/llvm/test/CodeGen/SPARC/LeonFixAllFDIVSQRTPassUT.ll
+++ b/llvm/test/CodeGen/SPARC/LeonFixAllFDIVSQRTPassUT.ll
@@ -19,10 +19,10 @@
 ; CHECK-NEXT:  nop
 ; CHECK-NEXT:  nop
 ; CHECK-NEXT:  nop
-define double @test_1(double* byval(double) %a, double* byval(double) %b) {
+define double @test_1(ptr byval(double) %a, ptr byval(double) %b) {
 entry:
-    %0 = load double, double* %a, align 8
-    %1 = load double, double* %b, align 8
+    %0 = load double, ptr %a, align 8
+    %1 = load double, ptr %b, align 8
     %res = fdiv double %0, %1
     ret double %res
 }
@@ -50,9 +50,9 @@ declare double @llvm.sqrt.f64(double) nounwind readonly
 ; CHECK-NEXT:  nop
 ; CHECK-NEXT:  nop
 ; CHECK-NEXT:  nop
-define double @test_2(double* byval(double) %a) {
+define double @test_2(ptr byval(double) %a) {
 entry:
-    %0 = load double, double* %a, align 8
+    %0 = load double, ptr %a, align 8
     %1 = call double @llvm.sqrt.f64(double %0) nounwind
     ret double %1
 }

diff  --git a/llvm/test/CodeGen/SPARC/LeonInsertNOPLoadPassUT.ll b/llvm/test/CodeGen/SPARC/LeonInsertNOPLoadPassUT.ll
index 5ee35e68ba818..7027d862f1340 100644
--- a/llvm/test/CodeGen/SPARC/LeonInsertNOPLoadPassUT.ll
+++ b/llvm/test/CodeGen/SPARC/LeonInsertNOPLoadPassUT.ll
@@ -20,12 +20,12 @@ define float @ld_float_test() #0 {
 ; CHECK-NEXT:    add %sp, 96, %sp
 entry:
   %f = alloca float, align 4
-  store float 0x3FF3C08320000000, float* %f, align 4
-  %0 = load float, float* %f, align 4
+  store float 0x3FF3C08320000000, ptr %f, align 4
+  %0 = load float, ptr %f, align 4
   ret float %0
 }
 
-define i32 @ld_i32_test(i32 *%p) {
+define i32 @ld_i32_test(ptr %p) {
 ; CHECK-LABEL: ld_i32_test:
 ; CHECK:         .cfi_startproc
 ; CHECK-NEXT:  ! %bb.0:
@@ -33,6 +33,6 @@ define i32 @ld_i32_test(i32 *%p) {
 ; CHECK-NEXT:    nop
 ; CHECK-NEXT:    retl
 ; CHECK-NEXT:    nop
-  %res = load i32, i32* %p
+  %res = load i32, ptr %p
   ret i32 %res
 }

diff  --git a/llvm/test/CodeGen/SPARC/LeonItinerariesUT.ll b/llvm/test/CodeGen/SPARC/LeonItinerariesUT.ll
index 796dcf2e145c8..01916a2a3c594 100644
--- a/llvm/test/CodeGen/SPARC/LeonItinerariesUT.ll
+++ b/llvm/test/CodeGen/SPARC/LeonItinerariesUT.ll
@@ -36,12 +36,12 @@
 ; LEON3_4_ITIN-NEXT:  retl
 ; LEON3_4_ITIN-NEXT:  fdivs
 
-define float @f32_ops(float* byval(float) %a, float* byval(float) %b, float* byval(float) %c, float* byval(float) %d) {
+define float @f32_ops(ptr byval(float) %a, ptr byval(float) %b, ptr byval(float) %c, ptr byval(float) %d) {
 entry:
-  %0 = load float, float* %a, align 8
-  %1 = load float, float* %b, align 8
-  %2 = load float, float* %c, align 8
-  %3 = load float, float* %d, align 8
+  %0 = load float, ptr %a, align 8
+  %1 = load float, ptr %b, align 8
+  %2 = load float, ptr %c, align 8
+  %3 = load float, ptr %d, align 8
   %4 = fadd float %0, %1
   %5 = fsub float %4, %2
   %6 = fmul float %5, %3

diff  --git a/llvm/test/CodeGen/SPARC/LeonSMACUMACInstructionUT.ll b/llvm/test/CodeGen/SPARC/LeonSMACUMACInstructionUT.ll
index bf47fcb910bfd..109fa8bd2bd9e 100644
--- a/llvm/test/CodeGen/SPARC/LeonSMACUMACInstructionUT.ll
+++ b/llvm/test/CodeGen/SPARC/LeonSMACUMACInstructionUT.ll
@@ -4,17 +4,17 @@
 
 ; CHECK-LABEL: smac_test:
 ; CHECK:       smac %i1, %i0, %i0
-define i32 @smac_test(i16* %a, i16* %b) {
+define i32 @smac_test(ptr %a, ptr %b) {
 entry:
 ;  %0 = tail call i32 asm sideeffect "smac $2, $1, $0", "={r2},{r3},{r4}"(i16* %a, i16* %b)
-  %0 = tail call i32 asm sideeffect "smac $2, $1, $0", "=r,rI,r"(i16* %a, i16* %b)
+  %0 = tail call i32 asm sideeffect "smac $2, $1, $0", "=r,rI,r"(ptr %a, ptr %b)
   ret i32 %0
 }
 
 ; CHECK-LABEL: umac_test:
 ; CHECK:       umac %i1, %i0, %i0
-define i32 @umac_test(i16* %a, i16* %b) {
+define i32 @umac_test(ptr %a, ptr %b) {
 entry:
-  %0 = tail call i32 asm sideeffect "umac $2, $1, $0", "=r,rI,r"(i16* %a, i16* %b)
+  %0 = tail call i32 asm sideeffect "umac $2, $1, $0", "=r,rI,r"(ptr %a, ptr %b)
   ret i32 %0
 }

diff  --git a/llvm/test/CodeGen/SPARC/atomics.ll b/llvm/test/CodeGen/SPARC/atomics.ll
index 6a9abcc32545a..d8c8879f40f13 100644
--- a/llvm/test/CodeGen/SPARC/atomics.ll
+++ b/llvm/test/CodeGen/SPARC/atomics.ll
@@ -15,12 +15,12 @@
 ; SPARC64:       membar
 ; SPARC64:       membar
 ; SPARC64:       stb {{.+}}, [%o2]
-define i8 @test_atomic_i8(i8* %ptr1, i8* %ptr2, i8* %ptr3) {
+define i8 @test_atomic_i8(ptr %ptr1, ptr %ptr2, ptr %ptr3) {
 entry:
-  %0 = load atomic i8, i8* %ptr1 acquire, align 1
-  %1 = load atomic i8, i8* %ptr2 acquire, align 1
+  %0 = load atomic i8, ptr %ptr1 acquire, align 1
+  %1 = load atomic i8, ptr %ptr2 acquire, align 1
   %2 = add i8 %0, %1
-  store atomic i8 %2, i8* %ptr3 release, align 1
+  store atomic i8 %2, ptr %ptr3 release, align 1
   ret i8 %2
 }
 
@@ -38,12 +38,12 @@ entry:
 ; SPARC64:       membar
 ; SPARC64:       membar
 ; SPARC64:       sth {{.+}}, [%o2]
-define i16 @test_atomic_i16(i16* %ptr1, i16* %ptr2, i16* %ptr3) {
+define i16 @test_atomic_i16(ptr %ptr1, ptr %ptr2, ptr %ptr3) {
 entry:
-  %0 = load atomic i16, i16* %ptr1 acquire, align 2
-  %1 = load atomic i16, i16* %ptr2 acquire, align 2
+  %0 = load atomic i16, ptr %ptr1 acquire, align 2
+  %1 = load atomic i16, ptr %ptr2 acquire, align 2
   %2 = add i16 %0, %1
-  store atomic i16 %2, i16* %ptr3 release, align 2
+  store atomic i16 %2, ptr %ptr3 release, align 2
   ret i16 %2
 }
 
@@ -61,12 +61,12 @@ entry:
 ; SPARC64:       membar
 ; SPARC64:       membar
 ; SPARC64:       st {{.+}}, [%o2]
-define i32 @test_atomic_i32(i32* %ptr1, i32* %ptr2, i32* %ptr3) {
+define i32 @test_atomic_i32(ptr %ptr1, ptr %ptr2, ptr %ptr3) {
 entry:
-  %0 = load atomic i32, i32* %ptr1 acquire, align 4
-  %1 = load atomic i32, i32* %ptr2 acquire, align 4
+  %0 = load atomic i32, ptr %ptr1 acquire, align 4
+  %1 = load atomic i32, ptr %ptr2 acquire, align 4
   %2 = add i32 %0, %1
-  store atomic i32 %2, i32* %ptr3 release, align 4
+  store atomic i32 %2, ptr %ptr3 release, align 4
   ret i32 %2
 }
 
@@ -136,9 +136,9 @@ entry:
 ; SPARC64:      [[LABEL2]]:
 ; SPARC64:       retl
 ; SPARC64:       srl %g2, %o1, %o0
-define i8 @test_cmpxchg_i8(i8 %a, i8* %ptr) {
+define i8 @test_cmpxchg_i8(i8 %a, ptr %ptr) {
 entry:
-  %pair = cmpxchg i8* %ptr, i8 %a, i8 123 monotonic monotonic
+  %pair = cmpxchg ptr %ptr, i8 %a, i8 123 monotonic monotonic
   %b = extractvalue { i8, i1 } %pair, 0
   ret i8 %b
 }
@@ -207,9 +207,9 @@ entry:
 ; SPARC64:      [[LABEL2]]:
 ; SPARC64:       retl
 ; SPARC64:       srl %g2, %o1, %o0
-define i16 @test_cmpxchg_i16(i16 %a, i16* %ptr) {
+define i16 @test_cmpxchg_i16(i16 %a, ptr %ptr) {
 entry:
-  %pair = cmpxchg i16* %ptr, i16 %a, i16 123 monotonic monotonic
+  %pair = cmpxchg ptr %ptr, i16 %a, i16 123 monotonic monotonic
   %b = extractvalue { i16, i1 } %pair, 0
   ret i16 %b
 }
@@ -220,9 +220,9 @@ entry:
 ; SPARC64-LABEL: test_cmpxchg_i32
 ; SPARC64:       mov 123, [[R:%[gilo][0-7]]]
 ; SPARC64:       cas [%o1], %o0, [[R]]
-define i32 @test_cmpxchg_i32(i32 %a, i32* %ptr) {
+define i32 @test_cmpxchg_i32(i32 %a, ptr %ptr) {
 entry:
-  %pair = cmpxchg i32* %ptr, i32 %a, i32 123 monotonic monotonic
+  %pair = cmpxchg ptr %ptr, i32 %a, i32 123 monotonic monotonic
   %b = extractvalue { i32, i1 } %pair, 0
   ret i32 %b
 }
@@ -233,9 +233,9 @@ entry:
 ; SPARC64-LABEL: test_swap_i8
 ; SPARC64:       mov 42, [[R:%[gilo][0-7]]]
 ; SPARC64:       cas
-define i8 @test_swap_i8(i8 %a, i8* %ptr) {
+define i8 @test_swap_i8(i8 %a, ptr %ptr) {
 entry:
-  %b = atomicrmw xchg i8* %ptr, i8 42 monotonic
+  %b = atomicrmw xchg ptr %ptr, i8 42 monotonic
   ret i8 %b
 }
 
@@ -245,9 +245,9 @@ entry:
 ; SPARC64-LABEL: test_swap_i16
 ; SPARC64:       mov 42, [[R:%[gilo][0-7]]]
 ; SPARC64:       cas
-define i16 @test_swap_i16(i16 %a, i16* %ptr) {
+define i16 @test_swap_i16(i16 %a, ptr %ptr) {
 entry:
-  %b = atomicrmw xchg i16* %ptr, i16 42 monotonic
+  %b = atomicrmw xchg ptr %ptr, i16 42 monotonic
   ret i16 %b
 }
 
@@ -257,9 +257,9 @@ entry:
 ; SPARC64-LABEL: test_swap_i32
 ; SPARC64:       mov 42, [[R:%[gilo][0-7]]]
 ; SPARC64:       swap [%o1], [[R]]
-define i32 @test_swap_i32(i32 %a, i32* %ptr) {
+define i32 @test_swap_i32(i32 %a, ptr %ptr) {
 entry:
-  %b = atomicrmw xchg i32* %ptr, i32 42 monotonic
+  %b = atomicrmw xchg ptr %ptr, i32 42 monotonic
   ret i32 %b
 }
 
@@ -275,9 +275,9 @@ entry:
 ; SPARC64: sub
 ; SPARC64: cas [{{%[gilo][0-7]}}]
 ; SPARC64: membar
-define zeroext i8 @test_load_sub_i8(i8* %p, i8 zeroext %v) {
+define zeroext i8 @test_load_sub_i8(ptr %p, i8 zeroext %v) {
 entry:
-  %0 = atomicrmw sub i8* %p, i8 %v seq_cst
+  %0 = atomicrmw sub ptr %p, i8 %v seq_cst
   ret i8 %0
 }
 
@@ -293,9 +293,9 @@ entry:
 ; SPARC64: sub
 ; SPARC64: cas [{{%[gilo][0-7]}}]
 ; SPARC64: membar
-define zeroext i16 @test_load_sub_i16(i16* %p, i16 zeroext %v) {
+define zeroext i16 @test_load_sub_i16(ptr %p, i16 zeroext %v) {
 entry:
-  %0 = atomicrmw sub i16* %p, i16 %v seq_cst
+  %0 = atomicrmw sub ptr %p, i16 %v seq_cst
   ret i16 %0
 }
 
@@ -311,9 +311,9 @@ entry:
 ; SPARC64: add [[U:%[gilo][0-7]]], %o1, [[V2:%[gilo][0-7]]]
 ; SPARC64: cas [%o0], [[V]], [[V2]]
 ; SPARC64: membar
-define zeroext i32 @test_load_add_i32(i32* %p, i32 zeroext %v) {
+define zeroext i32 @test_load_add_i32(ptr %p, i32 zeroext %v) {
 entry:
-  %0 = atomicrmw add i32* %p, i32 %v seq_cst
+  %0 = atomicrmw add ptr %p, i32 %v seq_cst
   ret i32 %0
 }
 
@@ -327,9 +327,9 @@ entry:
 ; SPARC64: xor
 ; SPARC64: cas [%o0]
 ; SPARC64: membar
-define zeroext i32 @test_load_xor_32(i32* %p, i32 zeroext %v) {
+define zeroext i32 @test_load_xor_32(ptr %p, i32 zeroext %v) {
 entry:
-  %0 = atomicrmw xor i32* %p, i32 %v seq_cst
+  %0 = atomicrmw xor ptr %p, i32 %v seq_cst
   ret i32 %0
 }
 
@@ -345,9 +345,9 @@ entry:
 ; SPARC64-NOT: xor
 ; SPARC64: cas [%o0]
 ; SPARC64: membar
-define zeroext i32 @test_load_and_32(i32* %p, i32 zeroext %v) {
+define zeroext i32 @test_load_and_32(ptr %p, i32 zeroext %v) {
 entry:
-  %0 = atomicrmw and i32* %p, i32 %v seq_cst
+  %0 = atomicrmw and ptr %p, i32 %v seq_cst
   ret i32 %0
 }
 
@@ -363,9 +363,9 @@ entry:
 ; SPARC64: xor
 ; SPARC64: cas [%o0]
 ; SPARC64: membar
-define zeroext i32 @test_load_nand_32(i32* %p, i32 zeroext %v) {
+define zeroext i32 @test_load_nand_32(ptr %p, i32 zeroext %v) {
 entry:
-  %0 = atomicrmw nand i32* %p, i32 %v seq_cst
+  %0 = atomicrmw nand ptr %p, i32 %v seq_cst
   ret i32 %0
 }
 
@@ -381,8 +381,8 @@ entry:
 ; SPARC64: movleu %icc
 ; SPARC64: cas [%o0]
 ; SPARC64: membar
-define zeroext i32 @test_load_umin_32(i32* %p, i32 zeroext %v) {
+define zeroext i32 @test_load_umin_32(ptr %p, i32 zeroext %v) {
 entry:
-  %0 = atomicrmw umin i32* %p, i32 %v seq_cst
+  %0 = atomicrmw umin ptr %p, i32 %v seq_cst
   ret i32 %0
 }

diff  --git a/llvm/test/CodeGen/SPARC/basictest.ll b/llvm/test/CodeGen/SPARC/basictest.ll
index 85da61abb5c51..54b4fad924211 100644
--- a/llvm/test/CodeGen/SPARC/basictest.ll
+++ b/llvm/test/CodeGen/SPARC/basictest.ll
@@ -28,11 +28,11 @@ define i32 @test2(i32 %X, i32 %Y) {
 ; CHECK-LABEL: store_zero:
 ; CHECK: st   %g0, [%o0]
 ; CHECK: st   %g0, [%o1+4]
-define i32 @store_zero(i32* %a, i32* %b) {
+define i32 @store_zero(ptr %a, ptr %b) {
 entry:
-  store i32 0, i32* %a, align 4
-  %0 = getelementptr inbounds i32, i32* %b, i32 1
-  store i32 0, i32* %0, align 4
+  store i32 0, ptr %a, align 4
+  %0 = getelementptr inbounds i32, ptr %b, i32 1
+  store i32 0, ptr %0, align 4
   ret i32 0
 }
 
@@ -88,10 +88,10 @@ define i64 @unsigned_multiply_32x32_64(i32 %a, i32 %b) {
 ; CHECK: addxcc %o2, 0, %o4
 ; CHECK: retl
 ; CHECK: std %o4, [%o1]
-define void @load_store_64bit(i64* %x, i64* %y) {
+define void @load_store_64bit(ptr %x, ptr %y) {
 entry:
-  %0 = load i64, i64* %x
+  %0 = load i64, ptr %x
   %add = add nsw i64 %0, 3
-  store i64 %add, i64* %y
+  store i64 %add, ptr %y
   ret void
 }

diff  --git a/llvm/test/CodeGen/SPARC/bigreturn.ll b/llvm/test/CodeGen/SPARC/bigreturn.ll
index 9dd7fad78567d..ef691ef025af6 100644
--- a/llvm/test/CodeGen/SPARC/bigreturn.ll
+++ b/llvm/test/CodeGen/SPARC/bigreturn.ll
@@ -5,7 +5,7 @@
 ;; Structs up to six registers in size can be returned in registers.
 ;; Note that the maximum return size and member placement is NOT
 ;; compatible with the C ABI - see SparcCallingConv.td.
-define { i32, i32 } @ret_i32_pair(i32 %a0, i32 %a1, i32* %p, i32* %q) {
+define { i32, i32 } @ret_i32_pair(i32 %a0, i32 %a1, ptr %p, ptr %q) {
 ; SPARC-LABEL: ret_i32_pair:
 ; SPARC:         .cfi_startproc
 ; SPARC-NEXT:  ! %bb.0:
@@ -33,15 +33,15 @@ define { i32, i32 } @ret_i32_pair(i32 %a0, i32 %a1, i32* %p, i32* %q) {
 ; SPARC64-NEXT:    restore
 ; SPARC64-NEXT:    retl
 ; SPARC64-NEXT:    nop
-  %r1 = load i32, i32* %p
+  %r1 = load i32, ptr %p
   %rv1 = insertvalue { i32, i32 } undef, i32 %r1, 0
-  store i32 0, i32* %p
-  %r2 = load i32, i32* %q
+  store i32 0, ptr %p
+  %r2 = load i32, ptr %q
   %rv2 = insertvalue { i32, i32 } %rv1, i32 %r2, 1
   ret { i32, i32 } %rv2
 }
 
-define void @call_ret_i32_pair(i32* %i0) {
+define void @call_ret_i32_pair(ptr %i0) {
 ; SPARC-LABEL: call_ret_i32_pair:
 ; SPARC:         .cfi_startproc
 ; SPARC-NEXT:  ! %bb.0:
@@ -72,11 +72,11 @@ define void @call_ret_i32_pair(i32* %i0) {
 ; SPARC64-NEXT:    retl
 ; SPARC64-NEXT:    nop
   %rv = call { i32, i32 } @ret_i32_pair(i32 undef, i32 undef,
-                                        i32* undef, i32* undef)
+                                        ptr undef, ptr undef)
   %e0 = extractvalue { i32, i32 } %rv, 0
-  store volatile i32 %e0, i32* %i0
+  store volatile i32 %e0, ptr %i0
   %e1 = extractvalue { i32, i32 } %rv, 1
-  store i32 %e1, i32* %i0
+  store i32 %e1, ptr %i0
   ret void
 }
 
@@ -127,7 +127,7 @@ define i32 @call_ret_i32_arr(i32 %0) {
 ;; Structs up to six registers in size can be returned in registers.
 ;; Note that the maximum return size and member placement is NOT
 ;; compatible with the C ABI - see SparcCallingConv.td.
-define { i64, i64 } @ret_i64_pair(i32 %a0, i32 %a1, i64* %p, i64* %q) {
+define { i64, i64 } @ret_i64_pair(i32 %a0, i32 %a1, ptr %p, ptr %q) {
 ; SPARC-LABEL: ret_i64_pair:
 ; SPARC:         .cfi_startproc
 ; SPARC-NEXT:  ! %bb.0:
@@ -157,15 +157,15 @@ define { i64, i64 } @ret_i64_pair(i32 %a0, i32 %a1, i64* %p, i64* %q) {
 ; SPARC64-NEXT:    restore
 ; SPARC64-NEXT:    retl
 ; SPARC64-NEXT:    nop
-  %r1 = load i64, i64* %p
+  %r1 = load i64, ptr %p
   %rv1 = insertvalue { i64, i64 } undef, i64 %r1, 0
-  store i64 0, i64* %p
-  %r2 = load i64, i64* %q
+  store i64 0, ptr %p
+  %r2 = load i64, ptr %q
   %rv2 = insertvalue { i64, i64 } %rv1, i64 %r2, 1
   ret { i64, i64 } %rv2
 }
 
-define void @call_ret_i64_pair(i64* %i0) {
+define void @call_ret_i64_pair(ptr %i0) {
 ; SPARC-LABEL: call_ret_i64_pair:
 ; SPARC:         .cfi_startproc
 ; SPARC-NEXT:  ! %bb.0:
@@ -200,11 +200,11 @@ define void @call_ret_i64_pair(i64* %i0) {
 ; SPARC64-NEXT:    retl
 ; SPARC64-NEXT:    nop
   %rv = call { i64, i64 } @ret_i64_pair(i32 undef, i32 undef,
-                                        i64* undef, i64* undef)
+                                        ptr undef, ptr undef)
   %e0 = extractvalue { i64, i64 } %rv, 0
-  store volatile i64 %e0, i64* %i0
+  store volatile i64 %e0, ptr %i0
   %e1 = extractvalue { i64, i64 } %rv, 1
-  store i64 %e1, i64* %i0
+  store i64 %e1, ptr %i0
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/SPARC/blockaddr.ll b/llvm/test/CodeGen/SPARC/blockaddr.ll
index 69d68a9c79199..eb039876caa38 100644
--- a/llvm/test/CodeGen/SPARC/blockaddr.ll
+++ b/llvm/test/CodeGen/SPARC/blockaddr.ll
@@ -8,11 +8,11 @@
 ;
 ; copied from test/CodeGen/Mips/blockaddr.ll and modified for SPARC
 ;
- at reg = common global i8* null, align 4
+ at reg = common global ptr null, align 4
 
-define i8* @dummy(i8* %x) nounwind readnone noinline {
+define ptr @dummy(ptr %x) nounwind readnone noinline {
 entry:
-  ret i8* %x
+  ret ptr %x
 }
 
 ; abs32-LABEL: func_block_addr:
@@ -64,14 +64,14 @@ entry:
 
 define void @func_block_addr() nounwind {
 entry:
-  %call = tail call i8* @dummy(i8* blockaddress(@func_block_addr, %baz))
-  indirectbr i8* %call, [label %baz, label %foo]
+  %call = tail call ptr @dummy(ptr blockaddress(@func_block_addr, %baz))
+  indirectbr ptr %call, [label %baz, label %foo]
 
 foo:                                              ; preds = %foo, %entry
-  store i8* blockaddress(@func_block_addr, %foo), i8** @reg, align 4
+  store ptr blockaddress(@func_block_addr, %foo), ptr @reg, align 4
   br label %foo
 
 baz:                                              ; preds = %entry
-  store i8* null, i8** @reg, align 4
+  store ptr null, ptr @reg, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/SPARC/cast-sret-func.ll b/llvm/test/CodeGen/SPARC/cast-sret-func.ll
index 192a5ef7567ec..47e5a0aa64f4b 100644
--- a/llvm/test/CodeGen/SPARC/cast-sret-func.ll
+++ b/llvm/test/CodeGen/SPARC/cast-sret-func.ll
@@ -9,8 +9,8 @@
 define void @test() nounwind {
 entry:
   %tmp = alloca %struct, align 4
-  call void bitcast (void ()* @func to void (%struct*)*)
-    (%struct* nonnull sret(%struct) %tmp)
+  call void @func
+    (ptr nonnull sret(%struct) %tmp)
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/SPARC/constructor.ll b/llvm/test/CodeGen/SPARC/constructor.ll
index dea152dd0f089..60f1d0fac8356 100644
--- a/llvm/test/CodeGen/SPARC/constructor.ll
+++ b/llvm/test/CodeGen/SPARC/constructor.ll
@@ -1,6 +1,6 @@
 ; RUN: llc -mtriple sparc-sun-solaris2.11 -use-ctors < %s | FileCheck --check-prefix=CTOR %s
 ; RUN: llc -mtriple sparc-sun-solaris2.11 < %s | FileCheck --check-prefix=INIT-ARRAY %s
- at llvm.global_ctors = appending global [2 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 65535, void ()* @f, i8* null}, { i32, void ()*, i8* } { i32 15, void ()* @g, i8* @v }]
+ at llvm.global_ctors = appending global [2 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65535, ptr @f, ptr null}, { i32, ptr, ptr } { i32 15, ptr @g, ptr @v }]
 
 @v = weak_odr global i8 0
 

diff  --git a/llvm/test/CodeGen/SPARC/exception.ll b/llvm/test/CodeGen/SPARC/exception.ll
index 7b03a64f04fc9..ea1a27930b6d7 100644
--- a/llvm/test/CodeGen/SPARC/exception.ll
+++ b/llvm/test/CodeGen/SPARC/exception.ll
@@ -5,7 +5,7 @@
 
 
 %struct.__fundamental_type_info_pseudo = type { %struct.__type_info_pseudo }
-%struct.__type_info_pseudo = type { i8*, i8* }
+%struct.__type_info_pseudo = type { ptr, ptr }
 
 @_ZTIi = external constant %struct.__fundamental_type_info_pseudo
 @_ZTIf = external constant %struct.__fundamental_type_info_pseudo
@@ -71,27 +71,27 @@
 ; V9PIC: .L_ZTIi.DW.stub:
 ; V9PIC-NEXT:   .xword _ZTIi
 
-define i32 @main(i32 %argc, i8** nocapture readnone %argv) unnamed_addr #0 personality i32 (i32, i64, i8*, i8*)* @__gxx_personality_v0 {
+define i32 @main(i32 %argc, ptr nocapture readnone %argv) unnamed_addr #0 personality ptr @__gxx_personality_v0 {
 entry:
   %0 = icmp eq i32 %argc, 2
-  %1 = tail call i8* @__cxa_allocate_exception(i32 4) #1
+  %1 = tail call ptr @__cxa_allocate_exception(i32 4) #1
   br i1 %0, label %"3", label %"4"
 
 "3":                                              ; preds = %entry
-  %2 = bitcast i8* %1 to i32*
-  store i32 0, i32* %2, align 4
-  invoke void @__cxa_throw(i8* %1, i8* bitcast (%struct.__fundamental_type_info_pseudo* @_ZTIi to i8*), void (i8*)* null) #2
+  %2 = bitcast ptr %1 to ptr
+  store i32 0, ptr %2, align 4
+  invoke void @__cxa_throw(ptr %1, ptr @_ZTIi, ptr null) #2
           to label %3 unwind label %"8"
 
 ; <label>:3                                       ; preds = %"3"
   unreachable
 
 "4":                                              ; preds = %entry
-  %4 = bitcast i8* %1 to float*
-  store float 1.000000e+00, float* %4, align 4
+  %4 = bitcast ptr %1 to ptr
+  store float 1.000000e+00, ptr %4, align 4
 
 
-  invoke void @__cxa_throw(i8* %1, i8* bitcast (%struct.__fundamental_type_info_pseudo* @_ZTIf to i8*), void (i8*)* null) #2
+  invoke void @__cxa_throw(ptr %1, ptr @_ZTIf, ptr null) #2
           to label %5 unwind label %"8"
 
 ; <label>:5                                       ; preds = %"4"
@@ -102,54 +102,54 @@ entry:
   ret i32 %6
 
 "8":                                              ; preds = %"4", %"3"
-  %exc = landingpad { i8*, i32 }
-          catch %struct.__fundamental_type_info_pseudo* @_ZTIi
-          catch %struct.__fundamental_type_info_pseudo* @_ZTIf
-  %exc_ptr12 = extractvalue { i8*, i32 } %exc, 0
-  %filter13 = extractvalue { i8*, i32 } %exc, 1
-  %typeid = tail call i32 @llvm.eh.typeid.for(i8* bitcast (%struct.__fundamental_type_info_pseudo* @_ZTIi to i8*))
+  %exc = landingpad { ptr, i32 }
+          catch ptr @_ZTIi
+          catch ptr @_ZTIf
+  %exc_ptr12 = extractvalue { ptr, i32 } %exc, 0
+  %filter13 = extractvalue { ptr, i32 } %exc, 1
+  %typeid = tail call i32 @llvm.eh.typeid.for(ptr @_ZTIi)
   %7 = icmp eq i32 %filter13, %typeid
   br i1 %7, label %"11", label %8
 
 ; <label>:8                                       ; preds = %"8"
-  %typeid8 = tail call i32 @llvm.eh.typeid.for(i8* bitcast (%struct.__fundamental_type_info_pseudo* @_ZTIf to i8*))
+  %typeid8 = tail call i32 @llvm.eh.typeid.for(ptr @_ZTIf)
   %9 = icmp eq i32 %filter13, %typeid8
   br i1 %9, label %"13", label %"9"
 
 "9":                                              ; preds = %8
-  resume { i8*, i32 } %exc
+  resume { ptr, i32 } %exc
 
 "11":                                             ; preds = %"8"
-  %10 = tail call i8* @__cxa_begin_catch(i8* %exc_ptr12) #1
-  %11 = tail call i32 @puts(i8* getelementptr inbounds ([12 x i8], [12 x i8]* @.cst, i32 0, i32 0))
+  %10 = tail call ptr @__cxa_begin_catch(ptr %exc_ptr12) #1
+  %11 = tail call i32 @puts(ptr @.cst)
   tail call void @__cxa_end_catch() #1
   br label %"5"
 
 "13":                                             ; preds = %8
-  %12 = tail call i8* @__cxa_begin_catch(i8* %exc_ptr12) #1
-  %13 = tail call i32 @puts(i8* getelementptr inbounds ([14 x i8], [14 x i8]* @.cst1, i32 0, i32 0))
+  %12 = tail call ptr @__cxa_begin_catch(ptr %exc_ptr12) #1
+  %13 = tail call i32 @puts(ptr @.cst1)
   tail call void @__cxa_end_catch() #1
   br label %"5"
 }
 
 ; Function Attrs: nounwind
-declare i8* @__cxa_allocate_exception(i32) #1
+declare ptr @__cxa_allocate_exception(i32) #1
 
 ; Function Attrs: noreturn
-declare void @__cxa_throw(i8*, i8*, void (i8*)*) #2
+declare void @__cxa_throw(ptr, ptr, ptr) #2
 
 declare void @__cxa_end_catch()
 
 ; Function Attrs: nounwind readnone
-declare i32 @llvm.eh.typeid.for(i8*) #3
+declare i32 @llvm.eh.typeid.for(ptr) #3
 
 ; Function Attrs: nounwind
-declare i8* @__cxa_begin_catch(i8*) #1
+declare ptr @__cxa_begin_catch(ptr) #1
 
 ; Function Attrs: nounwind
-declare i32 @puts(i8* nocapture readonly) #1
+declare i32 @puts(ptr nocapture readonly) #1
 
-declare i32 @__gxx_personality_v0(i32, i64, i8*, i8*)
+declare i32 @__gxx_personality_v0(i32, i64, ptr, ptr)
 
 attributes #0 = { "frame-pointer"="none" }
 attributes #1 = { nounwind }

diff  --git a/llvm/test/CodeGen/SPARC/fail-alloca-align.ll b/llvm/test/CodeGen/SPARC/fail-alloca-align.ll
index 062e3a4489f22..e2dc235389b1d 100644
--- a/llvm/test/CodeGen/SPARC/fail-alloca-align.ll
+++ b/llvm/test/CodeGen/SPARC/fail-alloca-align.ll
@@ -9,15 +9,15 @@
 define void @variable_alloca_with_overalignment(i32 %num) {
   %aligned = alloca i32, align 64
   %var_size = alloca i8, i32 %num, align 4
-  call void @foo(i32* %aligned, i8* %var_size)
+  call void @foo(ptr %aligned, ptr %var_size)
   ret void
 }
 
 ;; Same but with the alloca itself overaligned
 define void @variable_alloca_with_overalignment_2(i32 %num) {
   %var_size = alloca i8, i32 %num, align 64
-  call void @foo(i32* null, i8* %var_size)
+  call void @foo(ptr null, ptr %var_size)
   ret void
 }
 
-declare void @foo(i32*, i8*);
+declare void @foo(ptr, ptr);

diff  --git a/llvm/test/CodeGen/SPARC/float.ll b/llvm/test/CodeGen/SPARC/float.ll
index 248e98549c9f7..ce4a394d516b4 100644
--- a/llvm/test/CodeGen/SPARC/float.ll
+++ b/llvm/test/CodeGen/SPARC/float.ll
@@ -91,12 +91,12 @@ entry:
 ; SPARC64:          fxtos
 ; SPARC64:          fstox
 
-define void @test_xtos_stox(i64 %a, i64* %ptr0, float* %ptr1) {
+define void @test_xtos_stox(i64 %a, ptr %ptr0, ptr %ptr1) {
 entry:
   %0 = sitofp i64 %a to float
-  store float %0, float* %ptr1, align 8
+  store float %0, ptr %ptr1, align 8
   %1 = fptosi float %0 to i64
-  store i64 %1, i64* %ptr0, align 8
+  store i64 %1, ptr %ptr0, align 8
   ret void
 }
 
@@ -112,12 +112,12 @@ entry:
 ; SPARC64:          fitos
 ; SPARC64:          fstoi
 
-define void @test_itos_stoi(i32 %a, i32* %ptr0, float* %ptr1) {
+define void @test_itos_stoi(i32 %a, ptr %ptr0, ptr %ptr1) {
 entry:
   %0 = sitofp i32 %a to float
-  store float %0, float* %ptr1, align 8
+  store float %0, ptr %ptr1, align 8
   %1 = fptosi float %0 to i32
-  store i32 %1, i32* %ptr0, align 8
+  store i32 %1, ptr %ptr0, align 8
   ret void
 }
 
@@ -134,12 +134,12 @@ entry:
 ; SPARC64:          fxtod
 ; SPARC64:          fdtox
 
-define void @test_xtod_dtox(i64 %a, i64* %ptr0, double* %ptr1) {
+define void @test_xtod_dtox(i64 %a, ptr %ptr0, ptr %ptr1) {
 entry:
   %0 = sitofp i64 %a to double
-  store double %0, double* %ptr1, align 8
+  store double %0, ptr %ptr1, align 8
   %1 = fptosi double %0 to i64
-  store i64 %1, i64* %ptr0, align 8
+  store i64 %1, ptr %ptr0, align 8
   ret void
 }
 
@@ -155,12 +155,12 @@ entry:
 ; SPARC64:          fitod
 ; SPARC64:          fdtoi
 
-define void @test_itod_dtoi(i32 %a, double %b, i32* %ptr0, double* %ptr1) {
+define void @test_itod_dtoi(i32 %a, double %b, ptr %ptr0, ptr %ptr1) {
 entry:
   %0 = sitofp i32 %a to double
-  store double %0, double* %ptr1, align 8
+  store double %0, ptr %ptr1, align 8
   %1 = fptosi double %b to i32
-  store i32 %1, i32* %ptr0, align 8
+  store i32 %1, ptr %ptr0, align 8
   ret void
 }
 
@@ -176,12 +176,12 @@ entry:
 ; SPARC64-NOT:     call __floatundisf
 ; SPARC64-NOT:     call __fixunssfdi
 
-define void @test_uxtos_stoux(i64 %a, i64* %ptr0, float* %ptr1) {
+define void @test_uxtos_stoux(i64 %a, ptr %ptr0, ptr %ptr1) {
 entry:
   %0 = uitofp i64 %a to float
-  store float %0, float* %ptr1, align 8
+  store float %0, ptr %ptr1, align 8
   %1 = fptoui float %0 to i64
-  store i64 %1, i64* %ptr0, align 8
+  store i64 %1, ptr %ptr0, align 8
   ret void
 }
 
@@ -197,12 +197,12 @@ entry:
 ; SPARC64:     fdtos
 ; SPARC64:     fstoi
 
-define void @test_utos_stou(i32 %a, i32* %ptr0, float* %ptr1) {
+define void @test_utos_stou(i32 %a, ptr %ptr0, ptr %ptr1) {
 entry:
   %0 = uitofp i32 %a to float
-  store float %0, float* %ptr1, align 8
+  store float %0, ptr %ptr1, align 8
   %1 = fptoui float %0 to i32
-  store i32 %1, i32* %ptr0, align 8
+  store i32 %1, ptr %ptr0, align 8
   ret void
 }
 
@@ -219,12 +219,12 @@ entry:
 ; SPARC64-NOT:          call __floatundidf
 ; SPARC64-NOT:          call __floatunsdfdi
 
-define void @test_uxtod_dtoux(i64 %a, i64* %ptr0, double* %ptr1) {
+define void @test_uxtod_dtoux(i64 %a, ptr %ptr0, ptr %ptr1) {
 entry:
   %0 = uitofp i64 %a to double
-  store double %0, double* %ptr1, align 8
+  store double %0, ptr %ptr1, align 8
   %1 = fptoui double %0 to i64
-  store i64 %1, i64* %ptr0, align 8
+  store i64 %1, ptr %ptr0, align 8
   ret void
 }
 
@@ -240,11 +240,11 @@ entry:
 ; SPARC64-NOT:      fitod
 ; SPARC64:          fdtoi
 
-define void @test_utod_dtou(i32 %a, double %b, i32* %ptr0, double* %ptr1) {
+define void @test_utod_dtou(i32 %a, double %b, ptr %ptr0, ptr %ptr1) {
 entry:
   %0 = uitofp i32 %a to double
-  store double %0, double* %ptr1, align 8
+  store double %0, ptr %ptr1, align 8
   %1 = fptoui double %b to i32
-  store i32 %1, i32* %ptr0, align 8
+  store i32 %1, ptr %ptr0, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/SPARC/fp128.ll b/llvm/test/CodeGen/SPARC/fp128.ll
index 773e0698ffd3e..80f3da285e053 100644
--- a/llvm/test/CodeGen/SPARC/fp128.ll
+++ b/llvm/test/CodeGen/SPARC/fp128.ll
@@ -23,17 +23,17 @@
 ; CHECK:      std
 ; CHECK:      std
 
-define void @f128_ops(fp128* noalias sret(fp128) %scalar.result, fp128* byval(fp128) %a, fp128* byval(fp128) %b, fp128* byval(fp128) %c, fp128* byval(fp128) %d) {
+define void @f128_ops(ptr noalias sret(fp128) %scalar.result, ptr byval(fp128) %a, ptr byval(fp128) %b, ptr byval(fp128) %c, ptr byval(fp128) %d) {
 entry:
-  %0 = load fp128, fp128* %a, align 8
-  %1 = load fp128, fp128* %b, align 8
-  %2 = load fp128, fp128* %c, align 8
-  %3 = load fp128, fp128* %d, align 8
+  %0 = load fp128, ptr %a, align 8
+  %1 = load fp128, ptr %b, align 8
+  %2 = load fp128, ptr %c, align 8
+  %3 = load fp128, ptr %d, align 8
   %4 = fadd fp128 %0, %1
   %5 = fsub fp128 %4, %2
   %6 = fmul fp128 %5, %3
   %7 = fdiv fp128 %6, %4
-  store fp128 %7, fp128* %scalar.result, align 8
+  store fp128 %7, ptr %scalar.result, align 8
   ret void
 }
 
@@ -44,11 +44,11 @@ entry:
 ; CHECK-DAG:   ldd [%[[S1]]], %f{{.+}}
 ; CHECK:       jmp {{%[oi]7}}+12
 
-define void @f128_spill(fp128* noalias sret(fp128) %scalar.result, fp128* byval(fp128) %a) {
+define void @f128_spill(ptr noalias sret(fp128) %scalar.result, ptr byval(fp128) %a) {
 entry:
-  %0 = load fp128, fp128* %a, align 8
+  %0 = load fp128, ptr %a, align 8
   call void asm sideeffect "", "~{f0},~{f1},~{f2},~{f3},~{f4},~{f5},~{f6},~{f7},~{f8},~{f9},~{f10},~{f11},~{f12},~{f13},~{f14},~{f15},~{f16},~{f17},~{f18},~{f19},~{f20},~{f21},~{f22},~{f23},~{f24},~{f25},~{f26},~{f27},~{f28},~{f29},~{f30},~{f31}"()
-  store fp128 %0, fp128* %scalar.result, align 8
+  store fp128 %0, ptr %scalar.result, align 8
   ret void
 }
 
@@ -67,11 +67,11 @@ entry:
 ; CHECK-NEXT:  add %g1, %sp, %g1
 ; CHECK-NEXT:  ldd [%g1+8], %f{{.+}}
 
-define void @f128_spill_large(<251 x fp128>* noalias sret(<251 x fp128>) %scalar.result, <251 x fp128>* byval(<251 x fp128>) %a) {
+define void @f128_spill_large(ptr noalias sret(<251 x fp128>) %scalar.result, ptr byval(<251 x fp128>) %a) {
 entry:
-  %0 = load <251 x fp128>, <251 x fp128>* %a, align 8
+  %0 = load <251 x fp128>, ptr %a, align 8
   call void asm sideeffect "", "~{f0},~{f1},~{f2},~{f3},~{f4},~{f5},~{f6},~{f7},~{f8},~{f9},~{f10},~{f11},~{f12},~{f13},~{f14},~{f15},~{f16},~{f17},~{f18},~{f19},~{f20},~{f21},~{f22},~{f23},~{f24},~{f25},~{f26},~{f27},~{f28},~{f29},~{f30},~{f31}"()
-  store <251 x fp128> %0, <251 x fp128>* %scalar.result, align 8
+  store <251 x fp128> %0, ptr %scalar.result, align 8
   ret void
 }
 
@@ -80,10 +80,10 @@ entry:
 ; HARD-NEXT:  nop
 ; SOFT:       _Q_cmp
 
-define i32 @f128_compare(fp128* byval(fp128) %f0, fp128* byval(fp128) %f1, i32 %a, i32 %b) {
+define i32 @f128_compare(ptr byval(fp128) %f0, ptr byval(fp128) %f1, i32 %a, i32 %b) {
 entry:
-   %0 = load fp128, fp128* %f0, align 8
-   %1 = load fp128, fp128* %f1, align 8
+   %0 = load fp128, ptr %f0, align 8
+   %1 = load fp128, ptr %f1, align 8
    %cond = fcmp ult fp128 %0, %1
    %ret = select i1 %cond, i32 %a, i32 %b
    ret i32 %ret
@@ -95,9 +95,9 @@ entry:
 ; SOFT:       _Q_cmp
 ; SOFT:       cmp
 
-define i32 @f128_compare2(fp128* byval(fp128) %f0) {
+define i32 @f128_compare2(ptr byval(fp128) %f0) {
 entry:
-  %0 = load fp128, fp128* %f0, align 8
+  %0 = load fp128, ptr %f0, align 8
   %1 = fcmp ogt fp128 %0, 0xL00000000000000000000000000000000
   br i1 %1, label %"5", label %"7"
 
@@ -115,11 +115,11 @@ entry:
 ; BE:          fabss %f0, %f0
 ; EL:          fabss %f3, %f3
 
-define void @f128_abs(fp128* noalias sret(fp128) %scalar.result, fp128* byval(fp128) %a) {
+define void @f128_abs(ptr noalias sret(fp128) %scalar.result, ptr byval(fp128) %a) {
 entry:
-  %0 = load fp128, fp128* %a, align 8
+  %0 = load fp128, ptr %a, align 8
   %1 = tail call fp128 @llvm.fabs.f128(fp128 %0)
-  store fp128 %1, fp128* %scalar.result, align 8
+  store fp128 %1, ptr %scalar.result, align 8
   ret void
 }
 
@@ -130,10 +130,10 @@ declare fp128 @llvm.fabs.f128(fp128) nounwind readonly
 ; SOFT:       _Q_itoq
 ; SOFT:       unimp 16
 
-define void @int_to_f128(fp128* noalias sret(fp128) %scalar.result, i32 %i) {
+define void @int_to_f128(ptr noalias sret(fp128) %scalar.result, i32 %i) {
 entry:
   %0 = sitofp i32 %i to fp128
-  store fp128 %0, fp128* %scalar.result, align 8
+  store fp128 %0, ptr %scalar.result, align 8
   ret void
 }
 
@@ -145,12 +145,12 @@ entry:
 ; CHECK:       stb
 ; CHECK:       ret
 
-define void @fp128_unaligned(fp128* %a, fp128* %b, fp128* %c) {
+define void @fp128_unaligned(ptr %a, ptr %b, ptr %c) {
 entry:
-  %0 = load fp128, fp128* %a, align 1
-  %1 = load fp128, fp128* %b, align 1
+  %0 = load fp128, ptr %a, align 1
+  %1 = load fp128, ptr %b, align 1
   %2 = fadd fp128 %0, %1
-  store fp128 %2, fp128* %c, align 1
+  store fp128 %2, ptr %c, align 1
   ret void
 }
 
@@ -159,10 +159,10 @@ entry:
 ; SOFT:       _Q_utoq
 ; SOFT:       unimp 16
 
-define void @uint_to_f128(fp128* noalias sret(fp128) %scalar.result, i32 %i) {
+define void @uint_to_f128(ptr noalias sret(fp128) %scalar.result, i32 %i) {
 entry:
   %0 = uitofp i32 %i to fp128
-  store fp128 %0, fp128* %scalar.result, align 8
+  store fp128 %0, ptr %scalar.result, align 8
   ret void
 }
 
@@ -173,10 +173,10 @@ entry:
 ; SOFT:       call _Q_qtoi
 
 
-define i32 @f128_to_i32(fp128* %a, fp128* %b) {
+define i32 @f128_to_i32(ptr %a, ptr %b) {
 entry:
-  %0 = load fp128, fp128* %a, align 8
-  %1 = load fp128, fp128* %b, align 8
+  %0 = load fp128, ptr %a, align 8
+  %1 = load fp128, ptr %b, align 8
   %2 = fptoui fp128 %0 to i32
   %3 = fptosi fp128 %1 to i32
   %4 = add i32 %2, %3
@@ -195,19 +195,19 @@ entry:
 ; SOFT-DAG:      unimp 16
 ; SOFT-DAG:      call _Q_qtoi
 
-define void @test_itoq_qtoi(i64 %a, i32 %b, fp128* %c, fp128* %d, i64* %ptr0, fp128* %ptr1) {
+define void @test_itoq_qtoi(i64 %a, i32 %b, ptr %c, ptr %d, ptr %ptr0, ptr %ptr1) {
 entry:
   %0 = sitofp i64 %a to fp128
-  store  fp128 %0, fp128* %ptr1, align 8
-  %cval = load fp128, fp128* %c, align 8
+  store  fp128 %0, ptr %ptr1, align 8
+  %cval = load fp128, ptr %c, align 8
   %1 = fptosi fp128 %cval to i64
-  store  i64 %1, i64* %ptr0, align 8
+  store  i64 %1, ptr %ptr0, align 8
   %2 = sitofp i32 %b to fp128
-  store  fp128 %2, fp128* %ptr1, align 8
-  %dval = load fp128, fp128* %d, align 8
+  store  fp128 %2, ptr %ptr1, align 8
+  %dval = load fp128, ptr %d, align 8
   %3 = fptosi fp128 %dval to i32
-  %4 = bitcast i64* %ptr0 to i32*
-  store  i32 %3, i32* %4, align 8
+  %4 = bitcast ptr %ptr0 to ptr
+  store  i32 %3, ptr %4, align 8
   ret void
 }
 
@@ -220,19 +220,19 @@ entry:
 ; SOFT-DAG:      unimp 16
 ; SOFT-DAG:      call _Q_qtou
 
-define void @test_utoq_qtou(i64 %a, i32 %b, fp128* %c, fp128* %d, i64* %ptr0, fp128* %ptr1) {
+define void @test_utoq_qtou(i64 %a, i32 %b, ptr %c, ptr %d, ptr %ptr0, ptr %ptr1) {
 entry:
   %0 = uitofp i64 %a to fp128
-  store  fp128 %0, fp128* %ptr1, align 8
-  %cval = load fp128, fp128* %c, align 8
+  store  fp128 %0, ptr %ptr1, align 8
+  %cval = load fp128, ptr %c, align 8
   %1 = fptoui fp128 %cval to i64
-  store  i64 %1, i64* %ptr0, align 8
+  store  i64 %1, ptr %ptr0, align 8
   %2 = uitofp i32 %b to fp128
-  store  fp128 %2, fp128* %ptr1, align 8
-  %dval = load fp128, fp128* %d, align 8
+  store  fp128 %2, ptr %ptr1, align 8
+  %dval = load fp128, ptr %d, align 8
   %3 = fptoui fp128 %dval to i32
-  %4 = bitcast i64* %ptr0 to i32*
-  store  i32 %3, i32* %4, align 8
+  %4 = bitcast ptr %ptr0 to ptr
+  store  i32 %3, ptr %4, align 8
   ret void
 }
 
@@ -242,10 +242,10 @@ entry:
 ; BE:          fnegs %f0, %f0
 ; EL:          fnegs %f3, %f3
 
-define void @f128_neg(fp128* noalias sret(fp128) %scalar.result, fp128* byval(fp128) %a) {
+define void @f128_neg(ptr noalias sret(fp128) %scalar.result, ptr byval(fp128) %a) {
 entry:
-  %0 = load fp128, fp128* %a, align 8
+  %0 = load fp128, ptr %a, align 8
   %1 = fsub fp128 0xL00000000000000008000000000000000, %0
-  store fp128 %1, fp128* %scalar.result, align 8
+  store fp128 %1, ptr %scalar.result, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/SPARC/fp16-promote.ll b/llvm/test/CodeGen/SPARC/fp16-promote.ll
index 6afd8f1a2896b..f09c37b790844 100644
--- a/llvm/test/CodeGen/SPARC/fp16-promote.ll
+++ b/llvm/test/CodeGen/SPARC/fp16-promote.ll
@@ -5,18 +5,18 @@
 ; RUN: llc -mtriple=sparc-linux-gnu -mattr=v9 < %s | FileCheck %s -check-prefixes=ALL,V9
 ; RUN: llc -mtriple=sparc64-unknown-linux < %s | FileCheck %s -check-prefixes=ALL,SPARC64
 
-define void @test_load_store(half* %p, half* %q) nounwind {
+define void @test_load_store(ptr %p, ptr %q) nounwind {
 ; ALL-LABEL: test_load_store:
 ; ALL:       ! %bb.0:
 ; ALL-NEXT:    lduh [%o0], %o0
 ; ALL-NEXT:    retl
 ; ALL-NEXT:    sth %o0, [%o1]
-  %a = load half, half* %p
-  store half %a, half* %q
+  %a = load half, ptr %p
+  store half %a, ptr %q
   ret void
 }
 
-define float @test_fpextend_float(half* %p) nounwind {
+define float @test_fpextend_float(ptr %p) nounwind {
 ; V8-LABEL: test_fpextend_float:
 ; V8:       ! %bb.0:
 ; V8-NEXT:    save %sp, -96, %sp
@@ -40,12 +40,12 @@ define float @test_fpextend_float(half* %p) nounwind {
 ; SPARC64-NEXT:    lduh [%i0], %o0
 ; SPARC64-NEXT:    ret
 ; SPARC64-NEXT:    restore
-  %a = load half, half* %p
+  %a = load half, ptr %p
   %r = fpext half %a to float
   ret float %r
 }
 
-define double @test_fpextend_double(half* %p) nounwind {
+define double @test_fpextend_double(ptr %p) nounwind {
 ; V8-LABEL: test_fpextend_double:
 ; V8:       ! %bb.0:
 ; V8-NEXT:    save %sp, -96, %sp
@@ -72,12 +72,12 @@ define double @test_fpextend_double(half* %p) nounwind {
 ; SPARC64-NEXT:    fstod %f0, %f0
 ; SPARC64-NEXT:    ret
 ; SPARC64-NEXT:    restore
-  %a = load half, half* %p
+  %a = load half, ptr %p
   %r = fpext half %a to double
   ret double %r
 }
 
-define void @test_fpextend_fp128(half* %p, fp128* %out) nounwind {
+define void @test_fpextend_fp128(ptr %p, ptr %out) nounwind {
 ; V8-OPT-LABEL: test_fpextend_fp128:
 ; V8-OPT:       ! %bb.0:
 ; V8-OPT-NEXT:    save %sp, -112, %sp
@@ -155,13 +155,13 @@ define void @test_fpextend_fp128(half* %p, fp128* %out) nounwind {
 ; SPARC64-NEXT:    std %f0, [%i1]
 ; SPARC64-NEXT:    ret
 ; SPARC64-NEXT:    restore
-  %a = load half, half* %p
+  %a = load half, ptr %p
   %r = fpext half %a to fp128
-  store fp128 %r, fp128* %out
+  store fp128 %r, ptr %out
   ret void
 }
 
-define void @test_fptrunc_float(float %f, half* %p) nounwind {
+define void @test_fptrunc_float(float %f, ptr %p) nounwind {
 ; V8-OPT-LABEL: test_fptrunc_float:
 ; V8-OPT:       ! %bb.0:
 ; V8-OPT-NEXT:    save %sp, -96, %sp
@@ -200,11 +200,11 @@ define void @test_fptrunc_float(float %f, half* %p) nounwind {
 ; SPARC64-NEXT:    ret
 ; SPARC64-NEXT:    restore
   %a = fptrunc float %f to half
-  store half %a, half* %p
+  store half %a, ptr %p
   ret void
 }
 
-define void @test_fptrunc_double(double %d, half* %p) nounwind {
+define void @test_fptrunc_double(double %d, ptr %p) nounwind {
 ; V8-OPT-LABEL: test_fptrunc_double:
 ; V8-OPT:       ! %bb.0:
 ; V8-OPT-NEXT:    save %sp, -112, %sp
@@ -261,11 +261,11 @@ define void @test_fptrunc_double(double %d, half* %p) nounwind {
 ; SPARC64-NEXT:    ret
 ; SPARC64-NEXT:    restore
   %a = fptrunc double %d to half
-  store half %a, half* %p
+  store half %a, ptr %p
   ret void
 }
 
-define void @test_fptrunc_fp128(fp128* %dp, half* %p) nounwind {
+define void @test_fptrunc_fp128(ptr %dp, ptr %p) nounwind {
 ; V8-OPT-LABEL: test_fptrunc_fp128:
 ; V8-OPT:       ! %bb.0:
 ; V8-OPT-NEXT:    save %sp, -104, %sp
@@ -319,13 +319,13 @@ define void @test_fptrunc_fp128(fp128* %dp, half* %p) nounwind {
 ; SPARC64-NEXT:    sth %o0, [%i1]
 ; SPARC64-NEXT:    ret
 ; SPARC64-NEXT:    restore
-  %d = load fp128, fp128* %dp
+  %d = load fp128, ptr %dp
   %a = fptrunc fp128 %d to half
-  store half %a, half* %p
+  store half %a, ptr %p
   ret void
 }
 
-define void @test_fadd(half* %p, half* %q) nounwind {
+define void @test_fadd(ptr %p, ptr %q) nounwind {
 ; V8-OPT-LABEL: test_fadd:
 ; V8-OPT:       ! %bb.0:
 ; V8-OPT-NEXT:    save %sp, -104, %sp
@@ -392,14 +392,14 @@ define void @test_fadd(half* %p, half* %q) nounwind {
 ; SPARC64-NEXT:    sth %o0, [%i0]
 ; SPARC64-NEXT:    ret
 ; SPARC64-NEXT:    restore
-  %a = load half, half* %p
-  %b = load half, half* %q
+  %a = load half, ptr %p
+  %b = load half, ptr %q
   %r = fadd half %a, %b
-  store half %r, half* %p
+  store half %r, ptr %p
   ret void
 }
 
-define void @test_fmul(half* %p, half* %q) nounwind {
+define void @test_fmul(ptr %p, ptr %q) nounwind {
 ; V8-OPT-LABEL: test_fmul:
 ; V8-OPT:       ! %bb.0:
 ; V8-OPT-NEXT:    save %sp, -104, %sp
@@ -466,9 +466,9 @@ define void @test_fmul(half* %p, half* %q) nounwind {
 ; SPARC64-NEXT:    sth %o0, [%i0]
 ; SPARC64-NEXT:    ret
 ; SPARC64-NEXT:    restore
-  %a = load half, half* %p
-  %b = load half, half* %q
+  %a = load half, ptr %p
+  %b = load half, ptr %q
   %r = fmul half %a, %b
-  store half %r, half* %p
+  store half %r, ptr %p
   ret void
 }

diff  --git a/llvm/test/CodeGen/SPARC/func-addr.ll b/llvm/test/CodeGen/SPARC/func-addr.ll
index ff224b708eab8..c4147469362fc 100644
--- a/llvm/test/CodeGen/SPARC/func-addr.ll
+++ b/llvm/test/CodeGen/SPARC/func-addr.ll
@@ -12,10 +12,10 @@ entry:
 
 define void @test() #0 {
 entry:
-  %pFunc = alloca void (...)*, align 4
-  store void (...)* bitcast (void ()* @func1 to void (...)*), void (...)** %pFunc, align 4
-  %0 = load void (...)*, void (...)** %pFunc, align 4
-  %callee.knr.cast = bitcast void (...)* %0 to void ()*
+  %pFunc = alloca ptr, align 4
+  store ptr @func1, ptr %pFunc, align 4
+  %0 = load ptr, ptr %pFunc, align 4
+  %callee.knr.cast = bitcast ptr %0 to ptr
   call void %callee.knr.cast()
 
 ; abs32-LABEL:   test

diff  --git a/llvm/test/CodeGen/SPARC/globals.ll b/llvm/test/CodeGen/SPARC/globals.ll
index 36b94c8fbfacc..a31f9b6cb4c06 100644
--- a/llvm/test/CodeGen/SPARC/globals.ll
+++ b/llvm/test/CodeGen/SPARC/globals.ll
@@ -8,7 +8,7 @@
 @G = external global i8
 
 define zeroext i8 @loadG() {
-  %tmp = load i8, i8* @G
+  %tmp = load i8, ptr @G
   ret i8 %tmp
 }
 

diff  --git a/llvm/test/CodeGen/SPARC/inlineasm-output-template.ll b/llvm/test/CodeGen/SPARC/inlineasm-output-template.ll
index 63fae9f0f38f1..4231155480131 100644
--- a/llvm/test/CodeGen/SPARC/inlineasm-output-template.ll
+++ b/llvm/test/CodeGen/SPARC/inlineasm-output-template.ll
@@ -13,7 +13,7 @@ define dso_local i32 @test_inlineasm_c_output_template0() {
 ; CHECK: !TEST baz
 @baz = internal global i32 0, align 4
 define dso_local i32 @test_inlineasm_c_output_template1() {
-  tail call void asm sideeffect "!TEST ${0:c}", "i"(i32* nonnull @baz)
+  tail call void asm sideeffect "!TEST ${0:c}", "i"(ptr nonnull @baz)
   ret i32 42
 }
 

diff  --git a/llvm/test/CodeGen/SPARC/inlineasm-v9.ll b/llvm/test/CodeGen/SPARC/inlineasm-v9.ll
index 2ff1eb7d4ec94..94dbabc842966 100644
--- a/llvm/test/CodeGen/SPARC/inlineasm-v9.ll
+++ b/llvm/test/CodeGen/SPARC/inlineasm-v9.ll
@@ -52,9 +52,9 @@ entry:
 ; Ensure that the input register value is not truncated to 32bit.
 ; CHECK-LABEL: test_constraint_input_type
 ; CHECK: ldx [%i0], %o0
-define void @test_constraint_input_type(i64* %arg1) {
+define void @test_constraint_input_type(ptr %arg1) {
 Entry:
-  %val = load i64, i64* %arg1
+  %val = load i64, ptr %arg1
   tail call void asm sideeffect "", "{o0}"(i64 %val)
   ret void
 }

diff  --git a/llvm/test/CodeGen/SPARC/inlineasm.ll b/llvm/test/CodeGen/SPARC/inlineasm.ll
index 8bf34bf1609c1..ec27598e5e83b 100644
--- a/llvm/test/CodeGen/SPARC/inlineasm.ll
+++ b/llvm/test/CodeGen/SPARC/inlineasm.ll
@@ -14,8 +14,8 @@ entry:
 @v = external global %struct.anon, align 4
 define void @test_constraints_nro() {
 entry:
-  %0 = load i32, i32* getelementptr inbounds (%struct.anon, %struct.anon* @v, i32 0, i32 0);
-  %1 = load i32, i32* getelementptr inbounds (%struct.anon, %struct.anon* @v, i32 0, i32 1);
+  %0 = load i32, ptr @v;
+  %1 = load i32, ptr getelementptr inbounds (%struct.anon, ptr @v, i32 0, i32 1);
   tail call void asm sideeffect "", "nro,nro"(i32 %0, i32 %1)
   ret void
 }
@@ -49,10 +49,10 @@ entry:
 ; CHECK-LABEL: test_constraint_reg:
 ; CHECK:       ldda [%i1] 43, %g2
 ; CHECK:       ldda [%i1] 43, %g4
-define void @test_constraint_reg(i32 %s, i32* %ptr) {
+define void @test_constraint_reg(i32 %s, ptr %ptr) {
 entry:
-  %0 = tail call i64 asm sideeffect "ldda [$1] $2, $0", "={r2},r,n"(i32* %ptr, i32 43)
-  %1 = tail call i64 asm sideeffect "ldda [$1] $2, $0", "={g4},r,n"(i32* %ptr, i32 43)
+  %0 = tail call i64 asm sideeffect "ldda [$1] $2, $0", "={r2},r,n"(ptr %ptr, i32 43)
+  %1 = tail call i64 asm sideeffect "ldda [$1] $2, $0", "={g4},r,n"(ptr %ptr, i32 43)
   ret void
 }
 
@@ -62,10 +62,10 @@ entry:
 ; CHECK: mov %i0, %i5
 ; CHECK: sra %i5, 31, %i4
 ; CHECK: std %i4, [%i1]
-define i32 @test_constraint_r_i64(i32 %foo, i64* %out, i32 %o) {
+define i32 @test_constraint_r_i64(i32 %foo, ptr %out, i32 %o) {
 entry:
   %conv = sext i32 %foo to i64
-  tail call void asm sideeffect "std $0, [$1]", "r,r,~{memory}"(i64 %conv, i64* %out)
+  tail call void asm sideeffect "std $0, [$1]", "r,r,~{memory}"(i64 %conv, ptr %out)
   ret i32 %o
 }
 
@@ -74,10 +74,10 @@ entry:
 ; CHECK: mov %i0, %i5
 ; CHECK: sra %i5, 31, %i4
 ; CHECK: std %i4, [%i1]
-define i32 @test_constraint_r_i64_noleaf(i32 %foo, i64* %out, i32 %o) #0 {
+define i32 @test_constraint_r_i64_noleaf(i32 %foo, ptr %out, i32 %o) #0 {
 entry:
   %conv = sext i32 %foo to i64
-  tail call void asm sideeffect "std $0, [$1]", "r,r,~{memory}"(i64 %conv, i64* %out)
+  tail call void asm sideeffect "std $0, [$1]", "r,r,~{memory}"(i64 %conv, ptr %out)
   ret i32 %o
 }
 attributes #0 = { "frame-pointer"="all" }
@@ -115,9 +115,9 @@ entry:
 
 ; CHECK-LABEL: test_addressing_mode_i64:
 ; CHECK: std %l0, [%i0]
-define void @test_addressing_mode_i64(i64* %out) {
+define void @test_addressing_mode_i64(ptr %out) {
 entry:
-  call void asm "std %l0, $0", "=*m,r"(i64* elementtype(i64) nonnull %out, i64 0)
+  call void asm "std %l0, $0", "=*m,r"(ptr elementtype(i64) nonnull %out, i64 0)
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/SPARC/leafproc.ll b/llvm/test/CodeGen/SPARC/leafproc.ll
index e0b097f9c83f7..4aa3fbbc3058b 100644
--- a/llvm/test/CodeGen/SPARC/leafproc.ll
+++ b/llvm/test/CodeGen/SPARC/leafproc.ll
@@ -70,12 +70,12 @@ define i32 @leaf_proc_with_local_array(i32 %a, i32 %b, i32 %c) {
 entry:
   %array = alloca [2 x i32], align 4
   %0 = sub nsw i32 %b, %c
-  %1 = getelementptr inbounds [2 x i32], [2 x i32]* %array, i32 0, i32 0
-  store i32 1, i32* %1, align 4
-  %2 = getelementptr inbounds [2 x i32], [2 x i32]* %array, i32 0, i32 1
-  store i32 2, i32* %2, align 4
-  %3 = getelementptr inbounds [2 x i32], [2 x i32]* %array, i32 0, i32 %a
-  %4 = load i32, i32* %3, align 4
+  %1 = getelementptr inbounds [2 x i32], ptr %array, i32 0, i32 0
+  store i32 1, ptr %1, align 4
+  %2 = getelementptr inbounds [2 x i32], ptr %array, i32 0, i32 1
+  store i32 2, ptr %2, align 4
+  %3 = getelementptr inbounds [2 x i32], ptr %array, i32 0, i32 %a
+  %4 = load i32, ptr %3, align 4
   ret i32 %4
 }
 

diff  --git a/llvm/test/CodeGen/SPARC/missing-sret.ll b/llvm/test/CodeGen/SPARC/missing-sret.ll
index a030096c329ce..add91227e0123 100644
--- a/llvm/test/CodeGen/SPARC/missing-sret.ll
+++ b/llvm/test/CodeGen/SPARC/missing-sret.ll
@@ -1,9 +1,9 @@
 ; RUN: llc -mtriple=sparc -filetype=obj < %s > /dev/null 2> %t2
 
-define void @mul_double_cc({ double, double }* noalias sret({ double, double }) %agg.result, double %a, double %b, double %c, double %d) {
+define void @mul_double_cc(ptr noalias sret({ double, double }) %agg.result, double %a, double %b, double %c, double %d) {
 entry:
-  call void @__muldc3({ double, double }* sret({ double, double }) %agg.result, double %a, double %b, double %c, double %d)
+  call void @__muldc3(ptr sret({ double, double }) %agg.result, double %a, double %b, double %c, double %d)
   ret void
 }
 
-declare void @__muldc3({ double, double }*, double, double, double, double)
+declare void @__muldc3(ptr, double, double, double, double)

diff  --git a/llvm/test/CodeGen/SPARC/mult-alt-generic-sparc.ll b/llvm/test/CodeGen/SPARC/mult-alt-generic-sparc.ll
index 8ee5e409f3cb9..3ce23f2ee8045 100644
--- a/llvm/test/CodeGen/SPARC/mult-alt-generic-sparc.ll
+++ b/llvm/test/CodeGen/SPARC/mult-alt-generic-sparc.ll
@@ -9,7 +9,7 @@ target triple = "sparc"
 
 define void @single_m() nounwind {
 entry:
-  call void asm "foo $1,$0", "=*m,*m"(i32* elementtype(i32) @mout0, i32* elementtype(i32) @min1) nounwind
+  call void asm "foo $1,$0", "=*m,*m"(ptr elementtype(i32) @mout0, ptr elementtype(i32) @min1) nounwind
   ret void
 }
 
@@ -17,8 +17,8 @@ define void @single_o() nounwind {
 entry:
   %out0 = alloca i32, align 4
   %index = alloca i32, align 4
-  store i32 0, i32* %out0, align 4
-  store i32 1, i32* %index, align 4
+  store i32 0, ptr %out0, align 4
+  store i32 1, ptr %index, align 4
   ret void
 }
 
@@ -31,14 +31,14 @@ define void @single_lt() nounwind {
 entry:
   %out0 = alloca i32, align 4
   %in1 = alloca i32, align 4
-  store i32 0, i32* %out0, align 4
-  store i32 1, i32* %in1, align 4
-  %tmp = load i32, i32* %in1, align 4
+  store i32 0, ptr %out0, align 4
+  store i32 1, ptr %in1, align 4
+  %tmp = load i32, ptr %in1, align 4
   %0 = call i32 asm "foo $1,$0", "=r,<r"(i32 %tmp) nounwind
-  store i32 %0, i32* %out0, align 4
-  %tmp1 = load i32, i32* %in1, align 4
+  store i32 %0, ptr %out0, align 4
+  %tmp1 = load i32, ptr %in1, align 4
   %1 = call i32 asm "foo $1,$0", "=r,r<"(i32 %tmp1) nounwind
-  store i32 %1, i32* %out0, align 4
+  store i32 %1, ptr %out0, align 4
   ret void
 }
 
@@ -46,14 +46,14 @@ define void @single_gt() nounwind {
 entry:
   %out0 = alloca i32, align 4
   %in1 = alloca i32, align 4
-  store i32 0, i32* %out0, align 4
-  store i32 1, i32* %in1, align 4
-  %tmp = load i32, i32* %in1, align 4
+  store i32 0, ptr %out0, align 4
+  store i32 1, ptr %in1, align 4
+  %tmp = load i32, ptr %in1, align 4
   %0 = call i32 asm "foo $1,$0", "=r,>r"(i32 %tmp) nounwind
-  store i32 %0, i32* %out0, align 4
-  %tmp1 = load i32, i32* %in1, align 4
+  store i32 %0, ptr %out0, align 4
+  %tmp1 = load i32, ptr %in1, align 4
   %1 = call i32 asm "foo $1,$0", "=r,r>"(i32 %tmp1) nounwind
-  store i32 %1, i32* %out0, align 4
+  store i32 %1, ptr %out0, align 4
   ret void
 }
 
@@ -61,36 +61,36 @@ define void @single_r() nounwind {
 entry:
   %out0 = alloca i32, align 4
   %in1 = alloca i32, align 4
-  store i32 0, i32* %out0, align 4
-  store i32 1, i32* %in1, align 4
-  %tmp = load i32, i32* %in1, align 4
+  store i32 0, ptr %out0, align 4
+  store i32 1, ptr %in1, align 4
+  %tmp = load i32, ptr %in1, align 4
   %0 = call i32 asm "foo $1,$0", "=r,r"(i32 %tmp) nounwind
-  store i32 %0, i32* %out0, align 4
+  store i32 %0, ptr %out0, align 4
   ret void
 }
 
 define void @single_i() nounwind {
 entry:
   %out0 = alloca i32, align 4
-  store i32 0, i32* %out0, align 4
+  store i32 0, ptr %out0, align 4
   %0 = call i32 asm "foo $1,$0", "=r,i"(i32 1) nounwind
-  store i32 %0, i32* %out0, align 4
+  store i32 %0, ptr %out0, align 4
   ret void
 }
 
 define void @single_n() nounwind {
 entry:
   %out0 = alloca i32, align 4
-  store i32 0, i32* %out0, align 4
+  store i32 0, ptr %out0, align 4
   %0 = call i32 asm "foo $1,$0", "=r,n"(i32 1) nounwind
-  store i32 %0, i32* %out0, align 4
+  store i32 %0, ptr %out0, align 4
   ret void
 }
 
 define void @single_E() nounwind {
 entry:
   %out0 = alloca double, align 8
-  store double 0.000000e+000, double* %out0, align 8
+  store double 0.000000e+000, ptr %out0, align 8
 ; No lowering support.
 ;  %0 = call double asm "foo $1,$0", "=r,E"(double 1.000000e+001) nounwind
 ;  store double %0, double* %out0, align 8
@@ -100,7 +100,7 @@ entry:
 define void @single_F() nounwind {
 entry:
   %out0 = alloca double, align 8
-  store double 0.000000e+000, double* %out0, align 8
+  store double 0.000000e+000, ptr %out0, align 8
 ; No lowering support.
 ;  %0 = call double asm "foo $1,$0", "=r,F"(double 1.000000e+000) nounwind
 ;  store double %0, double* %out0, align 8
@@ -110,7 +110,7 @@ entry:
 define void @single_s() nounwind {
 entry:
   %out0 = alloca i32, align 4
-  store i32 0, i32* %out0, align 4
+  store i32 0, ptr %out0, align 4
   ret void
 }
 
@@ -118,16 +118,16 @@ define void @single_g() nounwind {
 entry:
   %out0 = alloca i32, align 4
   %in1 = alloca i32, align 4
-  store i32 0, i32* %out0, align 4
-  store i32 1, i32* %in1, align 4
-  %tmp = load i32, i32* %in1, align 4
+  store i32 0, ptr %out0, align 4
+  store i32 1, ptr %in1, align 4
+  %tmp = load i32, ptr %in1, align 4
   %0 = call i32 asm "foo $1,$0", "=r,imr"(i32 %tmp) nounwind
-  store i32 %0, i32* %out0, align 4
-  %tmp1 = load i32, i32* @min1, align 4
+  store i32 %0, ptr %out0, align 4
+  %tmp1 = load i32, ptr @min1, align 4
   %1 = call i32 asm "foo $1,$0", "=r,imr"(i32 %tmp1) nounwind
-  store i32 %1, i32* %out0, align 4
+  store i32 %1, ptr %out0, align 4
   %2 = call i32 asm "foo $1,$0", "=r,imr"(i32 1) nounwind
-  store i32 %2, i32* %out0, align 4
+  store i32 %2, ptr %out0, align 4
   ret void
 }
 
@@ -135,18 +135,18 @@ define void @single_X() nounwind {
 entry:
   %out0 = alloca i32, align 4
   %in1 = alloca i32, align 4
-  store i32 0, i32* %out0, align 4
-  store i32 1, i32* %in1, align 4
-  %tmp = load i32, i32* %in1, align 4
+  store i32 0, ptr %out0, align 4
+  store i32 1, ptr %in1, align 4
+  %tmp = load i32, ptr %in1, align 4
   %0 = call i32 asm "foo $1,$0", "=r,X"(i32 %tmp) nounwind
-  store i32 %0, i32* %out0, align 4
-  %tmp1 = load i32, i32* @min1, align 4
+  store i32 %0, ptr %out0, align 4
+  %tmp1 = load i32, ptr @min1, align 4
   %1 = call i32 asm "foo $1,$0", "=r,X"(i32 %tmp1) nounwind
-  store i32 %1, i32* %out0, align 4
+  store i32 %1, ptr %out0, align 4
   %2 = call i32 asm "foo $1,$0", "=r,X"(i32 1) nounwind
-  store i32 %2, i32* %out0, align 4
-  %3 = call i32 asm "foo $1,$0", "=r,X"(i32* getelementptr inbounds ([2 x i32], [2 x i32]* @marray, i32 0, i32 0)) nounwind
-  store i32 %3, i32* %out0, align 4
+  store i32 %2, ptr %out0, align 4
+  %3 = call i32 asm "foo $1,$0", "=r,X"(ptr @marray) nounwind
+  store i32 %3, ptr %out0, align 4
 ; No lowering support.
 ;  %4 = call i32 asm "foo $1,$0", "=r,X"(double 1.000000e+001) nounwind
 ;  store i32 %4, i32* %out0, align 4
@@ -158,16 +158,16 @@ entry:
 define void @single_p() nounwind {
 entry:
   %out0 = alloca i32, align 4
-  store i32 0, i32* %out0, align 4
-  %0 = call i32 asm "foo $1,$0", "=r,r"(i32* getelementptr inbounds ([2 x i32], [2 x i32]* @marray, i32 0, i32 0)) nounwind
-  store i32 %0, i32* %out0, align 4
+  store i32 0, ptr %out0, align 4
+  %0 = call i32 asm "foo $1,$0", "=r,r"(ptr @marray) nounwind
+  store i32 %0, ptr %out0, align 4
   ret void
 }
 
 define void @multi_m() nounwind {
 entry:
-  %tmp = load i32, i32* @min1, align 4
-  call void asm "foo $1,$0", "=*m|r,m|r"(i32* elementtype(i32) @mout0, i32 %tmp) nounwind
+  %tmp = load i32, ptr @min1, align 4
+  call void asm "foo $1,$0", "=*m|r,m|r"(ptr elementtype(i32) @mout0, i32 %tmp) nounwind
   ret void
 }
 
@@ -175,8 +175,8 @@ define void @multi_o() nounwind {
 entry:
   %out0 = alloca i32, align 4
   %index = alloca i32, align 4
-  store i32 0, i32* %out0, align 4
-  store i32 1, i32* %index, align 4
+  store i32 0, ptr %out0, align 4
+  store i32 1, ptr %index, align 4
   ret void
 }
 
@@ -189,14 +189,14 @@ define void @multi_lt() nounwind {
 entry:
   %out0 = alloca i32, align 4
   %in1 = alloca i32, align 4
-  store i32 0, i32* %out0, align 4
-  store i32 1, i32* %in1, align 4
-  %tmp = load i32, i32* %in1, align 4
+  store i32 0, ptr %out0, align 4
+  store i32 1, ptr %in1, align 4
+  %tmp = load i32, ptr %in1, align 4
   %0 = call i32 asm "foo $1,$0", "=r|r,r|<r"(i32 %tmp) nounwind
-  store i32 %0, i32* %out0, align 4
-  %tmp1 = load i32, i32* %in1, align 4
+  store i32 %0, ptr %out0, align 4
+  %tmp1 = load i32, ptr %in1, align 4
   %1 = call i32 asm "foo $1,$0", "=r|r,r|r<"(i32 %tmp1) nounwind
-  store i32 %1, i32* %out0, align 4
+  store i32 %1, ptr %out0, align 4
   ret void
 }
 
@@ -204,14 +204,14 @@ define void @multi_gt() nounwind {
 entry:
   %out0 = alloca i32, align 4
   %in1 = alloca i32, align 4
-  store i32 0, i32* %out0, align 4
-  store i32 1, i32* %in1, align 4
-  %tmp = load i32, i32* %in1, align 4
+  store i32 0, ptr %out0, align 4
+  store i32 1, ptr %in1, align 4
+  %tmp = load i32, ptr %in1, align 4
   %0 = call i32 asm "foo $1,$0", "=r|r,r|>r"(i32 %tmp) nounwind
-  store i32 %0, i32* %out0, align 4
-  %tmp1 = load i32, i32* %in1, align 4
+  store i32 %0, ptr %out0, align 4
+  %tmp1 = load i32, ptr %in1, align 4
   %1 = call i32 asm "foo $1,$0", "=r|r,r|r>"(i32 %tmp1) nounwind
-  store i32 %1, i32* %out0, align 4
+  store i32 %1, ptr %out0, align 4
   ret void
 }
 
@@ -219,36 +219,36 @@ define void @multi_r() nounwind {
 entry:
   %out0 = alloca i32, align 4
   %in1 = alloca i32, align 4
-  store i32 0, i32* %out0, align 4
-  store i32 1, i32* %in1, align 4
-  %tmp = load i32, i32* %in1, align 4
+  store i32 0, ptr %out0, align 4
+  store i32 1, ptr %in1, align 4
+  %tmp = load i32, ptr %in1, align 4
   %0 = call i32 asm "foo $1,$0", "=r|r,r|m"(i32 %tmp) nounwind
-  store i32 %0, i32* %out0, align 4
+  store i32 %0, ptr %out0, align 4
   ret void
 }
 
 define void @multi_i() nounwind {
 entry:
   %out0 = alloca i32, align 4
-  store i32 0, i32* %out0, align 4
+  store i32 0, ptr %out0, align 4
   %0 = call i32 asm "foo $1,$0", "=r|r,r|i"(i32 1) nounwind
-  store i32 %0, i32* %out0, align 4
+  store i32 %0, ptr %out0, align 4
   ret void
 }
 
 define void @multi_n() nounwind {
 entry:
   %out0 = alloca i32, align 4
-  store i32 0, i32* %out0, align 4
+  store i32 0, ptr %out0, align 4
   %0 = call i32 asm "foo $1,$0", "=r|r,r|n"(i32 1) nounwind
-  store i32 %0, i32* %out0, align 4
+  store i32 %0, ptr %out0, align 4
   ret void
 }
 
 define void @multi_E() nounwind {
 entry:
   %out0 = alloca double, align 8
-  store double 0.000000e+000, double* %out0, align 8
+  store double 0.000000e+000, ptr %out0, align 8
 ; No lowering support.
 ;  %0 = call double asm "foo $1,$0", "=r|r,r|E"(double 1.000000e+001) nounwind
 ;  store double %0, double* %out0, align 8
@@ -258,7 +258,7 @@ entry:
 define void @multi_F() nounwind {
 entry:
   %out0 = alloca double, align 8
-  store double 0.000000e+000, double* %out0, align 8
+  store double 0.000000e+000, ptr %out0, align 8
 ; No lowering support.
 ;  %0 = call double asm "foo $1,$0", "=r|r,r|F"(double 1.000000e+000) nounwind
 ;  store double %0, double* %out0, align 8
@@ -268,7 +268,7 @@ entry:
 define void @multi_s() nounwind {
 entry:
   %out0 = alloca i32, align 4
-  store i32 0, i32* %out0, align 4
+  store i32 0, ptr %out0, align 4
   ret void
 }
 
@@ -276,16 +276,16 @@ define void @multi_g() nounwind {
 entry:
   %out0 = alloca i32, align 4
   %in1 = alloca i32, align 4
-  store i32 0, i32* %out0, align 4
-  store i32 1, i32* %in1, align 4
-  %tmp = load i32, i32* %in1, align 4
+  store i32 0, ptr %out0, align 4
+  store i32 1, ptr %in1, align 4
+  %tmp = load i32, ptr %in1, align 4
   %0 = call i32 asm "foo $1,$0", "=r|r,r|imr"(i32 %tmp) nounwind
-  store i32 %0, i32* %out0, align 4
-  %tmp1 = load i32, i32* @min1, align 4
+  store i32 %0, ptr %out0, align 4
+  %tmp1 = load i32, ptr @min1, align 4
   %1 = call i32 asm "foo $1,$0", "=r|r,r|imr"(i32 %tmp1) nounwind
-  store i32 %1, i32* %out0, align 4
+  store i32 %1, ptr %out0, align 4
   %2 = call i32 asm "foo $1,$0", "=r|r,r|imr"(i32 1) nounwind
-  store i32 %2, i32* %out0, align 4
+  store i32 %2, ptr %out0, align 4
   ret void
 }
 
@@ -293,18 +293,18 @@ define void @multi_X() nounwind {
 entry:
   %out0 = alloca i32, align 4
   %in1 = alloca i32, align 4
-  store i32 0, i32* %out0, align 4
-  store i32 1, i32* %in1, align 4
-  %tmp = load i32, i32* %in1, align 4
+  store i32 0, ptr %out0, align 4
+  store i32 1, ptr %in1, align 4
+  %tmp = load i32, ptr %in1, align 4
   %0 = call i32 asm "foo $1,$0", "=r|r,r|X"(i32 %tmp) nounwind
-  store i32 %0, i32* %out0, align 4
-  %tmp1 = load i32, i32* @min1, align 4
+  store i32 %0, ptr %out0, align 4
+  %tmp1 = load i32, ptr @min1, align 4
   %1 = call i32 asm "foo $1,$0", "=r|r,r|X"(i32 %tmp1) nounwind
-  store i32 %1, i32* %out0, align 4
+  store i32 %1, ptr %out0, align 4
   %2 = call i32 asm "foo $1,$0", "=r|r,r|X"(i32 1) nounwind
-  store i32 %2, i32* %out0, align 4
-  %3 = call i32 asm "foo $1,$0", "=r|r,r|X"(i32* getelementptr inbounds ([2 x i32], [2 x i32]* @marray, i32 0, i32 0)) nounwind
-  store i32 %3, i32* %out0, align 4
+  store i32 %2, ptr %out0, align 4
+  %3 = call i32 asm "foo $1,$0", "=r|r,r|X"(ptr @marray) nounwind
+  store i32 %3, ptr %out0, align 4
 ; No lowering support.
 ;  %4 = call i32 asm "foo $1,$0", "=r|r,r|X"(double 1.000000e+001) nounwind
 ;  store i32 %4, i32* %out0, align 4
@@ -316,8 +316,8 @@ entry:
 define void @multi_p() nounwind {
 entry:
   %out0 = alloca i32, align 4
-  store i32 0, i32* %out0, align 4
-  %0 = call i32 asm "foo $1,$0", "=r|r,r|r"(i32* getelementptr inbounds ([2 x i32], [2 x i32]* @marray, i32 0, i32 0)) nounwind
-  store i32 %0, i32* %out0, align 4
+  store i32 0, ptr %out0, align 4
+  %0 = call i32 asm "foo $1,$0", "=r|r,r|r"(ptr @marray) nounwind
+  store i32 %0, ptr %out0, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/SPARC/obj-relocs.ll b/llvm/test/CodeGen/SPARC/obj-relocs.ll
index 1d4ea1a501283..68f6550d5a184 100644
--- a/llvm/test/CodeGen/SPARC/obj-relocs.ll
+++ b/llvm/test/CodeGen/SPARC/obj-relocs.ll
@@ -27,11 +27,11 @@
 
 define i64 @foo(i64 %a) {
 entry:
-  %0 = load i64, i64* @AGlobalVar, align 4
+  %0 = load i64, ptr @AGlobalVar, align 4
   %1 = add i64 %a, %0
-  %2 = call i64 @bar(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.mystr, i32 0, i32 0), i64 %1)
+  %2 = call i64 @bar(ptr @.mystr, i64 %1)
   ret i64 %2
 }
 
 
-declare i64 @bar(i8*, i64)
+declare i64 @bar(ptr, i64)

diff  --git a/llvm/test/CodeGen/SPARC/overflow-intrinsic-optimizations.ll b/llvm/test/CodeGen/SPARC/overflow-intrinsic-optimizations.ll
index a782aa12bd322..89c0b17efc70e 100644
--- a/llvm/test/CodeGen/SPARC/overflow-intrinsic-optimizations.ll
+++ b/llvm/test/CodeGen/SPARC/overflow-intrinsic-optimizations.ll
@@ -3,26 +3,26 @@
 declare { i128, i1 } @llvm.smul.with.overflow.i128(i128, i128)
 declare { i64, i1 } @llvm.smul.with.overflow.i64(i64, i64)
 
-define i32 @mul(i128 %a, i128 %b, i128* %r) {
+define i32 @mul(i128 %a, i128 %b, ptr %r) {
 ; CHECK-LABEL: mul
 ; CHECK-NOT: call __muloti4
   %mul4 = tail call { i128, i1 } @llvm.smul.with.overflow.i128(i128 %a, i128 %b)
   %mul.val = extractvalue { i128, i1 } %mul4, 0
   %mul.ov = extractvalue { i128, i1 } %mul4, 1
   %mul.not.ov = xor i1 %mul.ov, true
-  store i128 %mul.val, i128* %r, align 16
+  store i128 %mul.val, ptr %r, align 16
   %conv = zext i1 %mul.not.ov to i32
   ret i32 %conv
 }
 
-define i32 @mul2(i64 %a, i64 %b, i64* %r) {
+define i32 @mul2(i64 %a, i64 %b, ptr %r) {
 ; CHECK-LABEL: mul2
 ; CHECK-NOT: call __mulodi4
   %mul4 = tail call { i64, i1 } @llvm.smul.with.overflow.i64(i64 %a, i64 %b)
   %mul.val = extractvalue { i64, i1 } %mul4, 0
   %mul.ov = extractvalue { i64, i1 } %mul4, 1
   %mul.not.ov = xor i1 %mul.ov, true
-  store i64 %mul.val, i64* %r, align 16
+  store i64 %mul.val, ptr %r, align 16
   %conv = zext i1 %mul.not.ov to i32
   ret i32 %conv
 }

diff  --git a/llvm/test/CodeGen/SPARC/pic.ll b/llvm/test/CodeGen/SPARC/pic.ll
index 4edbffcc12baa..1670a93cc5d39 100644
--- a/llvm/test/CodeGen/SPARC/pic.ll
+++ b/llvm/test/CodeGen/SPARC/pic.ll
@@ -5,7 +5,7 @@
 define i32 @test() nounwind {
 ; CHECK:    ld [%i0+value], %i0
 entry:
-  %0 = load i32, i32* @value
+  %0 = load i32, ptr @value
   ret i32 %0
 }
 

diff  --git a/llvm/test/CodeGen/SPARC/private.ll b/llvm/test/CodeGen/SPARC/private.ll
index 400d907e150f8..6afd237f8f1cd 100644
--- a/llvm/test/CodeGen/SPARC/private.ll
+++ b/llvm/test/CodeGen/SPARC/private.ll
@@ -11,7 +11,7 @@ define private void @foo() {
 
 define i32 @bar() {
         call void @foo()
-	%1 = load i32, i32* @baz, align 4
+	%1 = load i32, ptr @baz, align 4
         ret i32 %1
 }
 

diff  --git a/llvm/test/CodeGen/SPARC/reserved-regs.ll b/llvm/test/CodeGen/SPARC/reserved-regs.ll
index ec6290586eeef..27ebf47081351 100644
--- a/llvm/test/CodeGen/SPARC/reserved-regs.ll
+++ b/llvm/test/CodeGen/SPARC/reserved-regs.ll
@@ -19,70 +19,70 @@
 ; CHECK: ret
 define void @use_all_i32_regs() {
 entry:
-  %0 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 0), align 16
-  %1 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 1), align 4
-  %2 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 2), align 8
-  %3 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 3), align 4
-  %4 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 4), align 16
-  %5 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 5), align 4
-  %6 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 6), align 8
-  %7 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 7), align 4
-  %8 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 8), align 16
-  %9 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 9), align 4
-  %10 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 10), align 8
-  %11 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 11), align 4
-  %12 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 12), align 16
-  %13 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 13), align 4
-  %14 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 14), align 8
-  %15 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 15), align 4
-  %16 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 16), align 16
-  %17 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 17), align 4
-  %18 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 18), align 8
-  %19 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 19), align 4
-  %20 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 20), align 16
-  %21 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 21), align 4
-  %22 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 22), align 8
-  %23 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 23), align 4
-  %24 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 24), align 16
-  %25 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 25), align 4
-  %26 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 26), align 8
-  %27 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 27), align 4
-  %28 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 28), align 16
-  %29 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 29), align 4
-  %30 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 30), align 8
-  %31 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 31), align 4
-  store volatile i32 %1, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 0), align 16
-  store volatile i32 %2, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 1), align 4
-  store volatile i32 %3, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 2), align 8
-  store volatile i32 %4, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 3), align 4
-  store volatile i32 %5, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 4), align 16
-  store volatile i32 %6, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 5), align 4
-  store volatile i32 %7, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 6), align 8
-  store volatile i32 %8, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 7), align 4
-  store volatile i32 %9, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 8), align 16
-  store volatile i32 %10, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 9), align 4
-  store volatile i32 %11, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 10), align 8
-  store volatile i32 %12, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 11), align 4
-  store volatile i32 %13, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 12), align 16
-  store volatile i32 %14, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 13), align 4
-  store volatile i32 %15, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 14), align 8
-  store volatile i32 %16, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 15), align 4
-  store volatile i32 %17, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 16), align 16
-  store volatile i32 %18, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 17), align 4
-  store volatile i32 %19, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 18), align 8
-  store volatile i32 %20, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 19), align 4
-  store volatile i32 %21, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 20), align 16
-  store volatile i32 %22, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 21), align 4
-  store volatile i32 %23, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 22), align 8
-  store volatile i32 %24, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 23), align 4
-  store volatile i32 %25, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 24), align 16
-  store volatile i32 %26, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 25), align 4
-  store volatile i32 %27, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 26), align 8
-  store volatile i32 %28, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 27), align 4
-  store volatile i32 %29, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 28), align 16
-  store volatile i32 %30, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 29), align 4
-  store volatile i32 %31, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 30), align 8
-  store volatile i32 %0, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 31), align 4
+  %0 = load volatile i32, ptr @g, align 16
+  %1 = load volatile i32, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 1), align 4
+  %2 = load volatile i32, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 2), align 8
+  %3 = load volatile i32, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 3), align 4
+  %4 = load volatile i32, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 4), align 16
+  %5 = load volatile i32, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 5), align 4
+  %6 = load volatile i32, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 6), align 8
+  %7 = load volatile i32, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 7), align 4
+  %8 = load volatile i32, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 8), align 16
+  %9 = load volatile i32, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 9), align 4
+  %10 = load volatile i32, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 10), align 8
+  %11 = load volatile i32, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 11), align 4
+  %12 = load volatile i32, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 12), align 16
+  %13 = load volatile i32, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 13), align 4
+  %14 = load volatile i32, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 14), align 8
+  %15 = load volatile i32, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 15), align 4
+  %16 = load volatile i32, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 16), align 16
+  %17 = load volatile i32, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 17), align 4
+  %18 = load volatile i32, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 18), align 8
+  %19 = load volatile i32, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 19), align 4
+  %20 = load volatile i32, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 20), align 16
+  %21 = load volatile i32, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 21), align 4
+  %22 = load volatile i32, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 22), align 8
+  %23 = load volatile i32, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 23), align 4
+  %24 = load volatile i32, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 24), align 16
+  %25 = load volatile i32, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 25), align 4
+  %26 = load volatile i32, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 26), align 8
+  %27 = load volatile i32, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 27), align 4
+  %28 = load volatile i32, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 28), align 16
+  %29 = load volatile i32, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 29), align 4
+  %30 = load volatile i32, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 30), align 8
+  %31 = load volatile i32, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 31), align 4
+  store volatile i32 %1, ptr @g, align 16
+  store volatile i32 %2, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 1), align 4
+  store volatile i32 %3, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 2), align 8
+  store volatile i32 %4, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 3), align 4
+  store volatile i32 %5, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 4), align 16
+  store volatile i32 %6, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 5), align 4
+  store volatile i32 %7, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 6), align 8
+  store volatile i32 %8, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 7), align 4
+  store volatile i32 %9, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 8), align 16
+  store volatile i32 %10, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 9), align 4
+  store volatile i32 %11, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 10), align 8
+  store volatile i32 %12, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 11), align 4
+  store volatile i32 %13, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 12), align 16
+  store volatile i32 %14, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 13), align 4
+  store volatile i32 %15, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 14), align 8
+  store volatile i32 %16, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 15), align 4
+  store volatile i32 %17, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 16), align 16
+  store volatile i32 %18, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 17), align 4
+  store volatile i32 %19, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 18), align 8
+  store volatile i32 %20, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 19), align 4
+  store volatile i32 %21, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 20), align 16
+  store volatile i32 %22, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 21), align 4
+  store volatile i32 %23, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 22), align 8
+  store volatile i32 %24, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 23), align 4
+  store volatile i32 %25, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 24), align 16
+  store volatile i32 %26, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 25), align 4
+  store volatile i32 %27, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 26), align 8
+  store volatile i32 %28, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 27), align 4
+  store volatile i32 %29, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 28), align 16
+  store volatile i32 %30, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 29), align 4
+  store volatile i32 %31, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 30), align 8
+  store volatile i32 %0, ptr getelementptr inbounds ([32 x i32], ptr @g, i64 0, i64 31), align 4
   ret void
 }
 
@@ -103,37 +103,37 @@ entry:
 ; CHECK: ret
 define void @use_all_i64_regs() {
 entry:
-  %0 = load volatile i64, i64* getelementptr inbounds ([16 x i64], [16 x i64]* @h, i64 0, i64 0), align 16
-  %1 = load volatile i64, i64* getelementptr inbounds ([16 x i64], [16 x i64]* @h, i64 0, i64 1), align 4
-  %2 = load volatile i64, i64* getelementptr inbounds ([16 x i64], [16 x i64]* @h, i64 0, i64 2), align 8
-  %3 = load volatile i64, i64* getelementptr inbounds ([16 x i64], [16 x i64]* @h, i64 0, i64 3), align 4
-  %4 = load volatile i64, i64* getelementptr inbounds ([16 x i64], [16 x i64]* @h, i64 0, i64 4), align 16
-  %5 = load volatile i64, i64* getelementptr inbounds ([16 x i64], [16 x i64]* @h, i64 0, i64 5), align 4
-  %6 = load volatile i64, i64* getelementptr inbounds ([16 x i64], [16 x i64]* @h, i64 0, i64 6), align 8
-  %7 = load volatile i64, i64* getelementptr inbounds ([16 x i64], [16 x i64]* @h, i64 0, i64 7), align 4
-  %8 = load volatile i64, i64* getelementptr inbounds ([16 x i64], [16 x i64]* @h, i64 0, i64 8), align 16
-  %9 = load volatile i64, i64* getelementptr inbounds ([16 x i64], [16 x i64]* @h, i64 0, i64 9), align 4
-  %10 = load volatile i64, i64* getelementptr inbounds ([16 x i64], [16 x i64]* @h, i64 0, i64 10), align 8
-  %11 = load volatile i64, i64* getelementptr inbounds ([16 x i64], [16 x i64]* @h, i64 0, i64 11), align 4
-  %12 = load volatile i64, i64* getelementptr inbounds ([16 x i64], [16 x i64]* @h, i64 0, i64 12), align 16
-  %13 = load volatile i64, i64* getelementptr inbounds ([16 x i64], [16 x i64]* @h, i64 0, i64 13), align 4
-  %14 = load volatile i64, i64* getelementptr inbounds ([16 x i64], [16 x i64]* @h, i64 0, i64 14), align 8
-  %15 = load volatile i64, i64* getelementptr inbounds ([16 x i64], [16 x i64]* @h, i64 0, i64 15), align 4
-  store volatile i64 %1, i64* getelementptr inbounds ([16 x i64], [16 x i64]* @h, i64 0, i64 0), align 16
-  store volatile i64 %2, i64* getelementptr inbounds ([16 x i64], [16 x i64]* @h, i64 0, i64 1), align 4
-  store volatile i64 %3, i64* getelementptr inbounds ([16 x i64], [16 x i64]* @h, i64 0, i64 2), align 8
-  store volatile i64 %4, i64* getelementptr inbounds ([16 x i64], [16 x i64]* @h, i64 0, i64 3), align 4
-  store volatile i64 %5, i64* getelementptr inbounds ([16 x i64], [16 x i64]* @h, i64 0, i64 4), align 16
-  store volatile i64 %6, i64* getelementptr inbounds ([16 x i64], [16 x i64]* @h, i64 0, i64 5), align 4
-  store volatile i64 %7, i64* getelementptr inbounds ([16 x i64], [16 x i64]* @h, i64 0, i64 6), align 8
-  store volatile i64 %8, i64* getelementptr inbounds ([16 x i64], [16 x i64]* @h, i64 0, i64 7), align 4
-  store volatile i64 %9, i64* getelementptr inbounds ([16 x i64], [16 x i64]* @h, i64 0, i64 8), align 16
-  store volatile i64 %10, i64* getelementptr inbounds ([16 x i64], [16 x i64]* @h, i64 0, i64 9), align 4
-  store volatile i64 %11, i64* getelementptr inbounds ([16 x i64], [16 x i64]* @h, i64 0, i64 10), align 8
-  store volatile i64 %12, i64* getelementptr inbounds ([16 x i64], [16 x i64]* @h, i64 0, i64 11), align 4
-  store volatile i64 %13, i64* getelementptr inbounds ([16 x i64], [16 x i64]* @h, i64 0, i64 12), align 16
-  store volatile i64 %14, i64* getelementptr inbounds ([16 x i64], [16 x i64]* @h, i64 0, i64 13), align 4
-  store volatile i64 %15, i64* getelementptr inbounds ([16 x i64], [16 x i64]* @h, i64 0, i64 14), align 8
-  store volatile i64 %0, i64* getelementptr inbounds ([16 x i64], [16 x i64]* @h, i64 0, i64 15), align 4
+  %0 = load volatile i64, ptr @h, align 16
+  %1 = load volatile i64, ptr getelementptr inbounds ([16 x i64], ptr @h, i64 0, i64 1), align 4
+  %2 = load volatile i64, ptr getelementptr inbounds ([16 x i64], ptr @h, i64 0, i64 2), align 8
+  %3 = load volatile i64, ptr getelementptr inbounds ([16 x i64], ptr @h, i64 0, i64 3), align 4
+  %4 = load volatile i64, ptr getelementptr inbounds ([16 x i64], ptr @h, i64 0, i64 4), align 16
+  %5 = load volatile i64, ptr getelementptr inbounds ([16 x i64], ptr @h, i64 0, i64 5), align 4
+  %6 = load volatile i64, ptr getelementptr inbounds ([16 x i64], ptr @h, i64 0, i64 6), align 8
+  %7 = load volatile i64, ptr getelementptr inbounds ([16 x i64], ptr @h, i64 0, i64 7), align 4
+  %8 = load volatile i64, ptr getelementptr inbounds ([16 x i64], ptr @h, i64 0, i64 8), align 16
+  %9 = load volatile i64, ptr getelementptr inbounds ([16 x i64], ptr @h, i64 0, i64 9), align 4
+  %10 = load volatile i64, ptr getelementptr inbounds ([16 x i64], ptr @h, i64 0, i64 10), align 8
+  %11 = load volatile i64, ptr getelementptr inbounds ([16 x i64], ptr @h, i64 0, i64 11), align 4
+  %12 = load volatile i64, ptr getelementptr inbounds ([16 x i64], ptr @h, i64 0, i64 12), align 16
+  %13 = load volatile i64, ptr getelementptr inbounds ([16 x i64], ptr @h, i64 0, i64 13), align 4
+  %14 = load volatile i64, ptr getelementptr inbounds ([16 x i64], ptr @h, i64 0, i64 14), align 8
+  %15 = load volatile i64, ptr getelementptr inbounds ([16 x i64], ptr @h, i64 0, i64 15), align 4
+  store volatile i64 %1, ptr @h, align 16
+  store volatile i64 %2, ptr getelementptr inbounds ([16 x i64], ptr @h, i64 0, i64 1), align 4
+  store volatile i64 %3, ptr getelementptr inbounds ([16 x i64], ptr @h, i64 0, i64 2), align 8
+  store volatile i64 %4, ptr getelementptr inbounds ([16 x i64], ptr @h, i64 0, i64 3), align 4
+  store volatile i64 %5, ptr getelementptr inbounds ([16 x i64], ptr @h, i64 0, i64 4), align 16
+  store volatile i64 %6, ptr getelementptr inbounds ([16 x i64], ptr @h, i64 0, i64 5), align 4
+  store volatile i64 %7, ptr getelementptr inbounds ([16 x i64], ptr @h, i64 0, i64 6), align 8
+  store volatile i64 %8, ptr getelementptr inbounds ([16 x i64], ptr @h, i64 0, i64 7), align 4
+  store volatile i64 %9, ptr getelementptr inbounds ([16 x i64], ptr @h, i64 0, i64 8), align 16
+  store volatile i64 %10, ptr getelementptr inbounds ([16 x i64], ptr @h, i64 0, i64 9), align 4
+  store volatile i64 %11, ptr getelementptr inbounds ([16 x i64], ptr @h, i64 0, i64 10), align 8
+  store volatile i64 %12, ptr getelementptr inbounds ([16 x i64], ptr @h, i64 0, i64 11), align 4
+  store volatile i64 %13, ptr getelementptr inbounds ([16 x i64], ptr @h, i64 0, i64 12), align 16
+  store volatile i64 %14, ptr getelementptr inbounds ([16 x i64], ptr @h, i64 0, i64 13), align 4
+  store volatile i64 %15, ptr getelementptr inbounds ([16 x i64], ptr @h, i64 0, i64 14), align 8
+  store volatile i64 %0, ptr getelementptr inbounds ([16 x i64], ptr @h, i64 0, i64 15), align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/SPARC/select-mask.ll b/llvm/test/CodeGen/SPARC/select-mask.ll
index 2e69a3b9be539..3929d66db343f 100644
--- a/llvm/test/CodeGen/SPARC/select-mask.ll
+++ b/llvm/test/CodeGen/SPARC/select-mask.ll
@@ -8,9 +8,9 @@
 ; CHECK: ldub [%o0], [[R:%[goli][0-7]]]
 ; CHECK: and [[R]], 1, [[V:%[goli][0-7]]]
 ; CHECK: cmp [[V]], 0
-define i32 @select_mask(i8* %this) {
+define i32 @select_mask(ptr %this) {
 entry:
-  %bf.load2 = load i8, i8* %this, align 4
+  %bf.load2 = load i8, ptr %this, align 4
   %bf.cast5 = trunc i8 %bf.load2 to i1
   %cond = select i1 %bf.cast5, i32 2, i32 0
   ret i32 %cond

diff  --git a/llvm/test/CodeGen/SPARC/setjmp.ll b/llvm/test/CodeGen/SPARC/setjmp.ll
index 3f81876b3f885..e7fab9d0b8df6 100644
--- a/llvm/test/CodeGen/SPARC/setjmp.ll
+++ b/llvm/test/CodeGen/SPARC/setjmp.ll
@@ -6,7 +6,7 @@
 %struct.jmpbuf_env = type { i32, i32, [1 x %struct.__jmp_buf_tag], i32 }
 %struct.__jmp_buf_tag = type { [3 x i32], i32, %0 }
 
- at jenv = common unnamed_addr global %struct.jmpbuf_env* null
+ at jenv = common unnamed_addr global ptr null
 @.cst = private unnamed_addr constant [30 x i8] c"in bar with jmp_buf's id: %d\0A\00", align 64
 
 ; CHECK-LABEL: foo
@@ -24,41 +24,41 @@
 ; V9:         st %o0, [%[[R]]+{{.+}}]
 
 ; Function Attrs: nounwind
-define i32 @foo(%struct.jmpbuf_env* byval(%struct.jmpbuf_env) %inbuf) #0 {
+define i32 @foo(ptr byval(%struct.jmpbuf_env) %inbuf) #0 {
 entry:
-  %0 = getelementptr inbounds %struct.jmpbuf_env, %struct.jmpbuf_env* %inbuf, i32 0, i32 0
-  store i32 0, i32* %0, align 4, !tbaa !4
-  %1 = getelementptr inbounds %struct.jmpbuf_env, %struct.jmpbuf_env* %inbuf, i32 0, i32 1
-  store i32 1, i32* %1, align 4, !tbaa !4
-  %2 = getelementptr inbounds %struct.jmpbuf_env, %struct.jmpbuf_env* %inbuf, i32 0, i32 2, i32 0
-  %3 = call i32 @_setjmp(%struct.__jmp_buf_tag* %2) #2
-  %4 = getelementptr inbounds %struct.jmpbuf_env, %struct.jmpbuf_env* %inbuf, i32 0, i32 3
-  store i32 %3, i32* %4, align 4, !tbaa !4
-  store %struct.jmpbuf_env* %inbuf, %struct.jmpbuf_env** @jenv, align 4, !tbaa !3
-  %5 = load i32, i32* %1, align 4, !tbaa !4
+  %0 = getelementptr inbounds %struct.jmpbuf_env, ptr %inbuf, i32 0, i32 0
+  store i32 0, ptr %0, align 4, !tbaa !4
+  %1 = getelementptr inbounds %struct.jmpbuf_env, ptr %inbuf, i32 0, i32 1
+  store i32 1, ptr %1, align 4, !tbaa !4
+  %2 = getelementptr inbounds %struct.jmpbuf_env, ptr %inbuf, i32 0, i32 2, i32 0
+  %3 = call i32 @_setjmp(ptr %2) #2
+  %4 = getelementptr inbounds %struct.jmpbuf_env, ptr %inbuf, i32 0, i32 3
+  store i32 %3, ptr %4, align 4, !tbaa !4
+  store ptr %inbuf, ptr @jenv, align 4, !tbaa !3
+  %5 = load i32, ptr %1, align 4, !tbaa !4
   %6 = icmp eq i32 %5, 1
   %7 = icmp eq i32 %3, 0
   %or.cond = and i1 %6, %7
   br i1 %or.cond, label %"4.i", label %bar.exit
 
 "4.i":                                            ; preds = %entry
-  call void @longjmp(%struct.__jmp_buf_tag* %2, i32 0) #1
+  call void @longjmp(ptr %2, i32 0) #1
   unreachable
 
 bar.exit:                                         ; preds = %entry
-  %8 = load i32, i32* %0, align 4, !tbaa !4
-  %9 = call i32 (i8*, ...) @printf(i8* noalias getelementptr inbounds ([30 x i8], [30 x i8]* @.cst, i32 0, i32 0), i32 %8) #0
+  %8 = load i32, ptr %0, align 4, !tbaa !4
+  %9 = call i32 (ptr, ...) @printf(ptr noalias @.cst, i32 %8) #0
   ret i32 0
 }
 
 ; Function Attrs: nounwind returns_twice
-declare i32 @_setjmp(%struct.__jmp_buf_tag*) #2
+declare i32 @_setjmp(ptr) #2
 
 ; Function Attrs: noreturn nounwind
-declare void @longjmp(%struct.__jmp_buf_tag*, i32) #1
+declare void @longjmp(ptr, i32) #1
 
 ; Function Attrs: nounwind
-declare i32 @printf(i8* nocapture, ...) #0
+declare i32 @printf(ptr nocapture, ...) #0
 
 
 attributes #0 = { nounwind }

diff  --git a/llvm/test/CodeGen/SPARC/spillsize.ll b/llvm/test/CodeGen/SPARC/spillsize.ll
index a82e5098ffd0c..6e4060a734433 100644
--- a/llvm/test/CodeGen/SPARC/spillsize.ll
+++ b/llvm/test/CodeGen/SPARC/spillsize.ll
@@ -9,17 +9,17 @@ target triple = "sparcv9"
 ; CHECK: stx %{{..}}, [%fp+
 ; CHECK: ldx [%fp+
 ; CHECK: ldx [%fp+
-define void @spill4(i64* nocapture %p) {
+define void @spill4(ptr nocapture %p) {
 entry:
-  %val0 = load i64, i64* %p
+  %val0 = load i64, ptr %p
   %cmp0 = icmp ult i64 %val0, 385672958347594845
   %cm80 = zext i1 %cmp0 to i64
-  store i64 %cm80, i64* %p, align 8
+  store i64 %cm80, ptr %p, align 8
   tail call void asm sideeffect "", "~{i0},~{i1},~{i2},~{i3},~{i4},~{i5},~{g2},~{g3},~{g4},~{g5},~{l0},~{l1},~{l2},~{l3},~{l4},~{l5},~{l6},~{l7},~{o0},~{o1},~{o2},~{o3},~{o4},~{o5},~{o7}"()
-  %arrayidx1 = getelementptr inbounds i64, i64* %p, i64 1
-  %val = load i64, i64* %arrayidx1
+  %arrayidx1 = getelementptr inbounds i64, ptr %p, i64 1
+  %val = load i64, ptr %arrayidx1
   %cmp = icmp ult i64 %val, 385672958347594845
   %cm8 = select i1 %cmp, i64 10, i64 20
-  store i64 %cm8, i64* %arrayidx1, align 8
+  store i64 %cm8, ptr %arrayidx1, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/SPARC/sret-secondary.ll b/llvm/test/CodeGen/SPARC/sret-secondary.ll
index 84ac0b943c931..ad26cb8915fc4 100644
--- a/llvm/test/CodeGen/SPARC/sret-secondary.ll
+++ b/llvm/test/CodeGen/SPARC/sret-secondary.ll
@@ -2,7 +2,7 @@
 
 ; CHECK: sparc only supports sret on the first parameter
 
-define void @foo(i32 %a, i32* sret(i32) %out) {
-  store i32 %a, i32* %out
+define void @foo(i32 %a, ptr sret(i32) %out) {
+  store i32 %a, ptr %out
   ret void
 }

diff  --git a/llvm/test/CodeGen/SPARC/stack-align.ll b/llvm/test/CodeGen/SPARC/stack-align.ll
index 6516fb78e48b1..6632237f08e27 100644
--- a/llvm/test/CodeGen/SPARC/stack-align.ll
+++ b/llvm/test/CodeGen/SPARC/stack-align.ll
@@ -1,6 +1,6 @@
 ; RUN: llc -march=sparc < %s | FileCheck %s --check-prefixes=CHECK,CHECK32
 ; RUN: llc -march=sparcv9 < %s | FileCheck %s --check-prefixes=CHECK,CHECK64
-declare void @stack_realign_helper(i32 %a, i32* %b)
+declare void @stack_realign_helper(i32 %a, ptr %b)
 
 ;; This is a function where we have a local variable of 64-byte
 ;; alignment.  We want to see that the stack is aligned (the initial
@@ -21,6 +21,6 @@ declare void @stack_realign_helper(i32 %a, i32* %b)
 define void @stack_realign(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g) {
 entry:
   %aligned = alloca i32, align 64
-  call void @stack_realign_helper(i32 %g, i32* %aligned)
+  call void @stack_realign_helper(i32 %g, ptr %aligned)
   ret void
 }

diff  --git a/llvm/test/CodeGen/SPARC/stack-protector.ll b/llvm/test/CodeGen/SPARC/stack-protector.ll
index 70a73664aa16a..f23fea17bdd37 100644
--- a/llvm/test/CodeGen/SPARC/stack-protector.ll
+++ b/llvm/test/CodeGen/SPARC/stack-protector.ll
@@ -11,23 +11,23 @@
 
 @"\01LC" = internal constant [11 x i8] c"buf == %s\0A\00"		; <[11 x i8]*> [#uses=1]
 
-define void @test(i8* %a) nounwind ssp {
+define void @test(ptr %a) nounwind ssp {
 entry:
-	%a_addr = alloca i8*		; <i8**> [#uses=2]
+	%a_addr = alloca ptr		; <i8**> [#uses=2]
 	%buf = alloca [8 x i8]		; <[8 x i8]*> [#uses=2]
   %"alloca point" = bitcast i32 0 to i32		; <i32> [#uses=0]
-	store i8* %a, i8** %a_addr
-	%buf1 = bitcast [8 x i8]* %buf to i8*		; <i8*> [#uses=1]
-	%0 = load i8*, i8** %a_addr, align 4		; <i8*> [#uses=1]
-	%1 = call i8* @strcpy(i8* %buf1, i8* %0) nounwind		; <i8*> [#uses=0]
-  %buf2 = bitcast [8 x i8]* %buf to i8*		; <i8*> [#uses=1]
-	%2 = call i32 (i8*, ...) @printf(i8* getelementptr ([11 x i8], [11 x i8]* @"\01LC", i32 0, i32 0), i8* %buf2) nounwind		; <i32> [#uses=0]
+	store ptr %a, ptr %a_addr
+	%buf1 = bitcast ptr %buf to ptr		; <i8*> [#uses=1]
+	%0 = load ptr, ptr %a_addr, align 4		; <i8*> [#uses=1]
+	%1 = call ptr @strcpy(ptr %buf1, ptr %0) nounwind		; <i8*> [#uses=0]
+  %buf2 = bitcast ptr %buf to ptr		; <i8*> [#uses=1]
+	%2 = call i32 (ptr, ...) @printf(ptr @"\01LC", ptr %buf2) nounwind		; <i32> [#uses=0]
 	br label %return
 
 return:		; preds = %entry
 	ret void
 }
 
-declare i8* @strcpy(i8*, i8*) nounwind
+declare ptr @strcpy(ptr, ptr) nounwind
 
-declare i32 @printf(i8*, ...) nounwind
+declare i32 @printf(ptr, ...) nounwind

diff  --git a/llvm/test/CodeGen/SPARC/tailcall.ll b/llvm/test/CodeGen/SPARC/tailcall.ll
index 9a0e7bc5c7923..45612c51ee133 100644
--- a/llvm/test/CodeGen/SPARC/tailcall.ll
+++ b/llvm/test/CodeGen/SPARC/tailcall.ll
@@ -74,7 +74,7 @@ entry:
 
 ; Perform tail call optimization for external symbol.
 
-define void @caller_extern(i8* %src) optsize #0 {
+define void @caller_extern(ptr %src) optsize #0 {
 ; V8-LABEL: caller_extern:
 ; V8:       ! %bb.0: ! %entry
 ; V8-NEXT:    sethi %hi(dest), %o1
@@ -101,16 +101,15 @@ define void @caller_extern(i8* %src) optsize #0 {
 ; V9-NEXT:    call memcpy
 ; V9-NEXT:    mov %g1, %o7
 entry:
-  tail call void @llvm.memcpy.p0i8.p0i8.i32(
-    i8* getelementptr inbounds ([2 x i8],
-    [2 x i8]* @dest, i32 0, i32 0),
-    i8* %src, i32 7, i1 false)
+  tail call void @llvm.memcpy.p0.p0.i32(
+    ptr @dest,
+    ptr %src, i32 7, i1 false)
   ret void
 }
 
 ; Perform tail call optimization for function pointer.
 
-define i32 @func_ptr_test(i32 ()* nocapture %func_ptr) #0 {
+define i32 @func_ptr_test(ptr nocapture %func_ptr) #0 {
 ; V8-LABEL: func_ptr_test:
 ; V8:       ! %bb.0: ! %entry
 ; V8-NEXT:    jmp %o0
@@ -125,7 +124,7 @@ entry:
   ret i32 %call
 }
 
-define i32 @func_ptr_test2(i32 (i32, i32, i32)* nocapture %func_ptr,
+define i32 @func_ptr_test2(ptr nocapture %func_ptr,
 ; V8-LABEL: func_ptr_test2:
 ; V8:       ! %bb.0: ! %entry
 ; V8-NEXT:    save %sp, -96, %sp
@@ -210,14 +209,14 @@ define i32 @caller_byval() #0 {
 ; V9-NEXT:    ret
 ; V9-NEXT:    restore %g0, %o0, %o0
 entry:
-  %a = alloca i32*
-  %r = tail call i32 @callee_byval(i32** byval(i32*) %a)
+  %a = alloca ptr
+  %r = tail call i32 @callee_byval(ptr byval(ptr) %a)
   ret i32 %r
 }
 
 ; Perform tail call optimization for sret function.
 
-define void @sret_test(%struct.a* noalias sret(%struct.a) %agg.result) #0 {
+define void @sret_test(ptr noalias sret(%struct.a) %agg.result) #0 {
 ; V8-LABEL: sret_test:
 ; V8:       ! %bb.0: ! %entry
 ; V8-NEXT:    mov %o7, %g1
@@ -230,8 +229,7 @@ define void @sret_test(%struct.a* noalias sret(%struct.a) %agg.result) #0 {
 ; V9-NEXT:    call sret_func
 ; V9-NEXT:    mov %g1, %o7
 entry:
-  tail call void bitcast (void (%struct.a*)* @sret_func to
-                          void (%struct.a*)*)(%struct.a* sret(%struct.a) %agg.result)
+  tail call void @sret_func(ptr sret(%struct.a) %agg.result)
   ret void
 }
 
@@ -239,7 +237,7 @@ entry:
 ; a struct and the other does not. Returning a large
 ; struct will generate a memcpy as the tail function.
 
-define void @ret_large_struct(%struct.big* noalias sret(%struct.big) %agg.result) #0 {
+define void @ret_large_struct(ptr noalias sret(%struct.big) %agg.result) #0 {
 ; V8-LABEL: ret_large_struct:
 ; V8:       ! %bb.0: ! %entry
 ; V8-NEXT:    save %sp, -96, %sp
@@ -265,8 +263,8 @@ define void @ret_large_struct(%struct.big* noalias sret(%struct.big) %agg.result
 ; V9-NEXT:    ret
 ; V9-NEXT:    restore
 entry:
-  %0 = bitcast %struct.big* %agg.result to i8*
-  tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %0, i8* align 4 bitcast (%struct.big* @bigstruct to i8*), i32 400, i1 false)
+  %0 = bitcast ptr %agg.result to ptr
+  tail call void @llvm.memcpy.p0.p0.i32(ptr align 4 %0, ptr align 4 @bigstruct, i32 400, i1 false)
   ret void
 }
 
@@ -286,7 +284,7 @@ define void @addri_test(i32 %ptr) #0 {
 ; V9-NEXT:    nop
 entry:
   %add = add nsw i32 %ptr, 4
-  %0 = inttoptr i32 %add to void ()*
+  %0 = inttoptr i32 %add to ptr
   tail call void %0() #1
   ret void
 }
@@ -297,9 +295,9 @@ entry:
 %struct.big = type { [100 x i32] }
 @bigstruct = global %struct.big zeroinitializer
 
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i1)
-declare void @sret_func(%struct.a* sret(%struct.a))
-declare i32 @callee_byval(i32** byval(i32*) %a)
+declare void @llvm.memcpy.p0.p0.i32(ptr, ptr, i32, i1)
+declare void @sret_func(ptr sret(%struct.a))
+declare i32 @callee_byval(ptr byval(ptr) %a)
 declare i32 @foo(i32)
 declare i32 @foo2(i32, i32)
 declare i32 @foo7(i32, i32, i32, i32, i32, i32, i32)

diff  --git a/llvm/test/CodeGen/SPARC/thread-pointer.ll b/llvm/test/CodeGen/SPARC/thread-pointer.ll
index 33e99aa94747e..1ee38e7577d83 100644
--- a/llvm/test/CodeGen/SPARC/thread-pointer.ll
+++ b/llvm/test/CodeGen/SPARC/thread-pointer.ll
@@ -2,10 +2,10 @@
 ; RUN: llc < %s -mtriple=sparc64-unknown-linux-gnu | FileCheck %s
 
 ; Function Attrs: nounwind readnone
-declare i8* @llvm.thread.pointer() #1
+declare ptr @llvm.thread.pointer() #1
 
-define i8* @thread_pointer() {
+define ptr @thread_pointer() {
 ; CHECK: mov %g7, %o0
-  %1 = tail call i8* @llvm.thread.pointer()
-  ret i8* %1
+  %1 = tail call ptr @llvm.thread.pointer()
+  ret ptr %1
 }

diff  --git a/llvm/test/CodeGen/SPARC/tls.ll b/llvm/test/CodeGen/SPARC/tls.ll
index da676ca84d40a..229e2ddaadcb0 100644
--- a/llvm/test/CodeGen/SPARC/tls.ll
+++ b/llvm/test/CodeGen/SPARC/tls.ll
@@ -34,9 +34,9 @@
 
 define i32 @test_tls_local() {
 entry:
-  %0 = load i32, i32* @local_symbol, align 4
+  %0 = load i32, ptr @local_symbol, align 4
   %1 = add i32 %0, 1
-  store i32 %1, i32* @local_symbol, align 4
+  store i32 %1, ptr @local_symbol, align 4
   ret i32 %1
 }
 
@@ -68,9 +68,9 @@ entry:
 
 define i32 @test_tls_extern() {
 entry:
-  %0 = load i32, i32* @extern_symbol, align 4
+  %0 = load i32, ptr @extern_symbol, align 4
   %1 = add i32 %0, 1
-  store i32 %1, i32* @extern_symbol, align 4
+  store i32 %1, ptr @extern_symbol, align 4
   ret i32 %1
 }
 

diff  --git a/llvm/test/CodeGen/SPARC/varargs-v8.ll b/llvm/test/CodeGen/SPARC/varargs-v8.ll
index 2af12547b3ff7..02351cd639fc9 100644
--- a/llvm/test/CodeGen/SPARC/varargs-v8.ll
+++ b/llvm/test/CodeGen/SPARC/varargs-v8.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=sparc -disable-sparc-leaf-proc | FileCheck %s
 
-define i32 @test(i32 %a, i8* %va) nounwind {
+define i32 @test(i32 %a, ptr %va) nounwind {
 ; CHECK-LABEL: test:
 ; CHECK:       ! %bb.0: ! %entry
 ; CHECK-NEXT:    save %sp, -96, %sp
@@ -14,11 +14,11 @@ define i32 @test(i32 %a, i8* %va) nounwind {
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:    restore %i1, %i0, %o0
 entry:
-  %va.addr = alloca i8*, align 4
-  store i8* %va, i8** %va.addr, align 4
-  %0 = va_arg i8** %va.addr, i64
+  %va.addr = alloca ptr, align 4
+  store ptr %va, ptr %va.addr, align 4
+  %0 = va_arg ptr %va.addr, i64
   %conv1 = trunc i64 %0 to i32
-  %1 = va_arg i8** %va.addr, i32
+  %1 = va_arg ptr %va.addr, i32
   %add3 = add nsw i32 %1, %conv1
   ret i32 %add3
 }

diff  --git a/llvm/test/CodeGen/SPARC/varargs.ll b/llvm/test/CodeGen/SPARC/varargs.ll
index 576acc284fb95..b0c05664fc4e8 100644
--- a/llvm/test/CodeGen/SPARC/varargs.ll
+++ b/llvm/test/CodeGen/SPARC/varargs.ll
@@ -14,18 +14,18 @@ target triple = "sparcv9-sun-solaris"
 ; Store the address of the ... args to %ap at %fp+BIAS+128-8
 ; add %fp, 2191, [[R:[gilo][0-7]]]
 ; stx [[R]], [%fp+2039]
-define double @varargsfunc(i8* nocapture %fmt, double %sum, ...) {
+define double @varargsfunc(ptr nocapture %fmt, double %sum, ...) {
 entry:
-  %ap = alloca i8*, align 4
-  %ap1 = bitcast i8** %ap to i8*
-  call void @llvm.va_start(i8* %ap1)
+  %ap = alloca ptr, align 4
+  %ap1 = bitcast ptr %ap to ptr
+  call void @llvm.va_start(ptr %ap1)
   br label %for.cond
 
 for.cond:
-  %fmt.addr.0 = phi i8* [ %fmt, %entry ], [ %incdec.ptr, %for.cond.backedge ]
+  %fmt.addr.0 = phi ptr [ %fmt, %entry ], [ %incdec.ptr, %for.cond.backedge ]
   %sum.addr.0 = phi double [ %sum, %entry ], [ %sum.addr.0.be, %for.cond.backedge ]
-  %incdec.ptr = getelementptr inbounds i8, i8* %fmt.addr.0, i64 1
-  %0 = load i8, i8* %fmt.addr.0, align 1
+  %incdec.ptr = getelementptr inbounds i8, ptr %fmt.addr.0, i64 1
+  %0 = load i8, ptr %fmt.addr.0, align 1
   %conv = sext i8 %0 to i32
   switch i32 %conv, label %sw.default [
     i32 105, label %sw.bb
@@ -38,7 +38,7 @@ for.cond:
 ; stx %[[AP2]], [%fp+2039]
 ; ld [%[[AP]]]
 sw.bb:
-  %1 = va_arg i8** %ap, i32
+  %1 = va_arg ptr %ap, i32
   %conv2 = sitofp i32 %1 to double
   br label %for.cond.backedge
 
@@ -48,7 +48,7 @@ sw.bb:
 ; stx %[[AP2]], [%fp+2039]
 ; ldd [%[[AP]]]
 sw.bb3:
-  %2 = va_arg i8** %ap, double
+  %2 = va_arg ptr %ap, double
   br label %for.cond.backedge
 
 for.cond.backedge:
@@ -60,7 +60,7 @@ sw.default:
   ret double %sum.addr.0
 }
 
-declare void @llvm.va_start(i8*)
+declare void @llvm.va_start(ptr)
 
 @.str = private unnamed_addr constant [4 x i8] c"abc\00", align 1
 
@@ -71,6 +71,6 @@ declare void @llvm.va_start(i8*)
 ; CHECK: , %o2
 define i32 @call_1d() #0 {
 entry:
-  %call = call double (i8*, double, ...) @varargsfunc(i8* undef, double 1.000000e+00, double 2.000000e+00)
+  %call = call double (ptr, double, ...) @varargsfunc(ptr undef, double 1.000000e+00, double 2.000000e+00)
   ret i32 1
 }

diff  --git a/llvm/test/CodeGen/SPARC/vector-extract-elt.ll b/llvm/test/CodeGen/SPARC/vector-extract-elt.ll
index b76e0e9c15e43..8c3d5183b9932 100644
--- a/llvm/test/CodeGen/SPARC/vector-extract-elt.ll
+++ b/llvm/test/CodeGen/SPARC/vector-extract-elt.ll
@@ -4,11 +4,11 @@
 ; If computeKnownSignBits (in SelectionDAG) can do a simple
 ; look-thru for extractelement then we know that the add will yield a
 ; non-negative result.
-define i1 @test1(<4 x i16>* %in) {
+define i1 @test1(ptr %in) {
 ; CHECK-LABEL: ! %bb.0:
 ; CHECK-NEXT:        retl
 ; CHECK-NEXT:        mov %g0, %o0
-  %vec2 = load <4 x i16>, <4 x i16>* %in, align 1
+  %vec2 = load <4 x i16>, ptr %in, align 1
   %vec3 = lshr <4 x i16> %vec2, <i16 2, i16 2, i16 2, i16 2>
   %vec4 = sext <4 x i16> %vec3 to <4 x i32>
   %elt0 = extractelement <4 x i32> %vec4, i32 0

diff  --git a/llvm/test/CodeGen/SPARC/zerostructcall.ll b/llvm/test/CodeGen/SPARC/zerostructcall.ll
index 2aa5e56fe891b..9b9976faba730 100644
--- a/llvm/test/CodeGen/SPARC/zerostructcall.ll
+++ b/llvm/test/CodeGen/SPARC/zerostructcall.ll
@@ -10,13 +10,13 @@
 define void @struct_ptr_test(i32 %i) {
 entry:
   %i.addr = alloca i32, align 4
-  store i32 %i, i32* %i.addr, align 4
-  %0 = bitcast i32* %i.addr to %struct.S*
-  call void @struct_ptr_fn(%struct.S* byval(%struct.S) align 1 %0)
+  store i32 %i, ptr %i.addr, align 4
+  %0 = bitcast ptr %i.addr to ptr
+  call void @struct_ptr_fn(ptr byval(%struct.S) align 1 %0)
   ret void
 }
 
-declare void @struct_ptr_fn(%struct.S* byval(%struct.S) align 1)
+declare void @struct_ptr_fn(ptr byval(%struct.S) align 1)
 
 ; CHECK-LABEL: struct_test
 ; CHECK:       call struct_fn
@@ -29,7 +29,7 @@ declare void @struct_ptr_fn(%struct.S* byval(%struct.S) align 1)
 
 define void @struct_test() {
 entry:
-  tail call void @struct_fn(%struct.U* byval(%struct.U) align 1 getelementptr inbounds ([1 x %struct.U], [1 x %struct.U]* @a, i32 0, i32 0))
+  tail call void @struct_fn(ptr byval(%struct.U) align 1 @a)
   ret void
 }
 
@@ -38,14 +38,14 @@ entry:
 ; CHECK-NEXT:  nop
 ; CHECK-NEXT:  ret
 
-declare void @struct_fn(%struct.U* byval(%struct.U) align 1)
+declare void @struct_fn(ptr byval(%struct.U) align 1)
 
 @b = internal global [1 x %struct.U] zeroinitializer, align 1
 
 define void @struct_arg_test() {
 entry:
-  tail call void @struct_arg_fn(%struct.U* byval(%struct.U) align 1 getelementptr inbounds ([1 x %struct.U], [1 x %struct.U]* @b, i32 0, i32 0))
+  tail call void @struct_arg_fn(ptr byval(%struct.U) align 1 @b)
   ret void
 }
 
-declare void @struct_arg_fn(%struct.U* byval(%struct.U) align 1)
+declare void @struct_arg_fn(ptr byval(%struct.U) align 1)

diff  --git a/llvm/test/CodeGen/SystemZ/Large/branch-01.ll b/llvm/test/CodeGen/SystemZ/Large/branch-01.ll
index 91e00dc273c1b..482b19a17fd68 100644
--- a/llvm/test/CodeGen/SystemZ/Large/branch-01.ll
+++ b/llvm/test/CodeGen/SystemZ/Large/branch-01.ll
@@ -3480,16 +3480,16 @@ target triple = "s390x-ibm-linux"
 ; Function Attrs: nounwind
 define signext i32 @main(i32 signext %arg, ptr nocapture readonly %arg1) local_unnamed_addr #0 {
 bb:
-  %i = load i80, ptr getelementptr inbounds (<{ <{ { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } } }>, <{ { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } } }>, <{ { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } } }>, <{ { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } } }>, <{ { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } } }>, <{ { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } } }>, <{ { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } } }> }>, <{ <{ { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } } }>, <{ { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } } }>, <{ { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } } }>, <{ { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } } }>, <{ { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } } }>, <{ { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } } }>, <{ { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } } }> }>* @g_205, i64 0, i32 3, i32 2, i32 4, i32 1), align 2, !noalias !1
+  %i = load i80, ptr getelementptr inbounds (<{ <{ { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } } }>, <{ { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } } }>, <{ { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } } }>, <{ { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } } }>, <{ { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } } }>, <{ { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } } }>, <{ { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } } }> }>, ptr @g_205, i64 0, i32 3, i32 2, i32 4, i32 1), align 2, !noalias !1
   %i2 = lshr i80 %i, 10
   %i3 = trunc i80 %i2 to i64
   %i4 = and i64 %i3, 2
   %i5 = sub nsw i64 0, %i4
   %i6 = and i64 %i5, 46
-  %i7 = load i80, ptr getelementptr inbounds (<{ <{ { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, <{ <{ { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>* @g_278, i64 0, i32 1, i32 5, i32 0), align 2, !noalias !1
+  %i7 = load i80, ptr getelementptr inbounds (<{ <{ { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, ptr @g_278, i64 0, i32 1, i32 5, i32 0), align 2, !noalias !1
   %i8 = lshr i80 %i7, 23
   %i9 = trunc i80 %i8 to i8
-  %i10 = load i8, ptr getelementptr inbounds (<{ <{ { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } } }>, <{ { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } } }>, <{ { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } } }>, <{ { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } } }>, <{ { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } } }>, <{ { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } } }>, <{ { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } } }> }>, <{ <{ { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } } }>, <{ { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } } }>, <{ { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } } }>, <{ { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } } }>, <{ { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } } }>, <{ { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } } }>, <{ { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } } }> }>* @g_205, i64 0, i32 3, i32 2, i32 1), align 4, !tbaa !6, !noalias !14
+  %i10 = load i8, ptr getelementptr inbounds (<{ <{ { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } } }>, <{ { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } } }>, <{ { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } } }>, <{ { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } } }>, <{ { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } } }>, <{ { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } } }>, <{ { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } } }> }>, ptr @g_205, i64 0, i32 3, i32 2, i32 1), align 4, !tbaa !6, !noalias !14
   %i11 = lshr i80 %i7, 57
   %i12 = trunc i80 %i11 to i8
   %i13 = sdiv i8 %i12, -10
@@ -3518,10 +3518,10 @@ bb25:                                             ; preds = %bb15
   %i27 = and i16 %i26, 1
   store i16 %i27, ptr @g_129, align 2, !tbaa !20, !noalias !14
   store i32 0, ptr @g_13, align 4, !tbaa !15, !noalias !14
-  store i16 1, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1383, i64 0, i32 7), align 2, !tbaa !20, !noalias !14
+  store i16 1, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1383, i64 0, i32 7), align 2, !tbaa !20, !noalias !14
   call fastcc void @func_62(ptr noalias nonnull null, i64 1) #4, !noalias !14
   %i28 = load volatile ptr, ptr @g_1971, align 8, !tbaa !21, !noalias !14
-  store i16 -12, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2957, i64 0, i32 0), align 2, !tbaa !23, !noalias !14
+  store i16 -12, ptr @g_2957, align 2, !tbaa !23, !noalias !14
   %i29 = load volatile i32, ptr @g_6, align 4, !tbaa !15
   call fastcc void @transparent_crc(i64 undef, ptr @.str.3, i32 signext undef)
   %i30 = load i32, ptr @g_13, align 4, !tbaa !15
@@ -3530,7 +3530,7 @@ bb25:                                             ; preds = %bb15
   %i32 = load i8, ptr @g_14, align 2, !tbaa !19
   %i33 = sext i8 %i32 to i64
   call fastcc void @transparent_crc(i64 %i33, ptr @.str.5, i32 signext undef)
-  %i34 = load i32, ptr getelementptr inbounds ({ i32, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, ptr @g_31, i64 0, i32 0), align 4, !tbaa !29
+  %i34 = load i32, ptr @g_31, align 4, !tbaa !29
   %i35 = sext i32 %i34 to i64
   call fastcc void @transparent_crc(i64 %i35, ptr @.str.6, i32 signext undef)
   call fastcc void @transparent_crc(i64 undef, ptr @.str.8, i32 signext undef)
@@ -3625,7 +3625,7 @@ bb25:                                             ; preds = %bb15
   %i96 = trunc i80 %i95 to i64
   %i97 = ashr exact i64 %i96, 32
   call fastcc void @transparent_crc(i64 %i97, ptr @.str.53, i32 signext 0)
-  %i98 = getelementptr inbounds [3 x [9 x %1]], ptr bitcast (<{ <{ { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>* @g_278 to ptr), i64 0, i64 2, i64 0
+  %i98 = getelementptr inbounds [3 x [9 x %1]], ptr @g_278, i64 0, i64 2, i64 0
   %i100 = load i80, ptr %i98, align 2
   %i101 = lshr i80 %i100, 57
   %i102 = trunc i80 %i101 to i64
@@ -3739,7 +3739,7 @@ bb25:                                             ; preds = %bb15
   %i179 = trunc i80 %i178 to i64
   %i180 = ashr exact i64 %i179, 32
   call fastcc void @transparent_crc(i64 %i180, ptr @.str.110, i32 signext undef)
-  %i181 = getelementptr inbounds [9 x %2], ptr bitcast (<{ { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_695 to ptr), i64 0, i64 0
+  %i181 = getelementptr inbounds [9 x %2], ptr @g_695, i64 0, i64 0
   %i183 = load volatile i120, ptr %i181, align 1
   %i184 = load volatile i120, ptr %i181, align 1
   %i185 = lshr i120 %i184, 78
@@ -3793,7 +3793,7 @@ bb25:                                             ; preds = %bb15
   %i222 = trunc i80 %i221 to i64
   %i223 = ashr exact i64 %i222, 32
   call fastcc void @transparent_crc(i64 %i223, ptr @.str.126, i32 signext undef)
-  %i224 = getelementptr inbounds [6 x %3], ptr bitcast (<{ { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } } }>* @g_720 to ptr), i64 0, i64 0, i32 4, i32 1
+  %i224 = getelementptr inbounds [6 x %3], ptr @g_720, i64 0, i64 0, i32 4, i32 1
   %i226 = load i80, ptr %i224, align 2
   %i227 = lshr i80 %i226, 49
   %i228 = trunc i80 %i227 to i64
@@ -3801,7 +3801,7 @@ bb25:                                             ; preds = %bb15
   %i229 = load volatile i80, ptr %i224, align 2
   call fastcc void @transparent_crc(i64 0, ptr @.str.133, i32 signext 0)
   call fastcc void @transparent_crc(i64 0, ptr @.str.135, i32 signext 0)
-  %i230 = getelementptr inbounds [9 x [7 x %1]], ptr bitcast (<{ <{ { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>* @g_736 to ptr), i64 0, i64 0, i64 1
+  %i230 = getelementptr inbounds [9 x [7 x %1]], ptr @g_736, i64 0, i64 0, i64 1
   %i232 = load i80, ptr %i230, align 2
   %i233 = lshr i80 %i232, 57
   %i234 = trunc i80 %i233 to i64
@@ -4007,91 +4007,91 @@ bb25:                                             ; preds = %bb15
   %i393 = trunc i80 %i392 to i64
   %i394 = ashr exact i64 %i393, 32
   call fastcc void @transparent_crc(i64 %i394, ptr @.str.188, i32 signext undef)
-  %i395 = load volatile i80, ptr bitcast (<{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_901 to ptr), align 8
+  %i395 = load volatile i80, ptr @g_901, align 8
   %i396 = lshr i80 %i395, 57
   %i397 = trunc i80 %i396 to i64
   call fastcc void @transparent_crc(i64 %i397, ptr @.str.189, i32 signext undef)
-  %i398 = load volatile i80, ptr bitcast (<{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_901 to ptr), align 8
+  %i398 = load volatile i80, ptr @g_901, align 8
   %i399 = shl i80 %i398, 23
   %i400 = ashr i80 %i399, 64
   %i401 = shl nsw i80 %i400, 32
   %i402 = trunc i80 %i401 to i64
   %i403 = ashr exact i64 %i402, 32
   call fastcc void @transparent_crc(i64 %i403, ptr @.str.190, i32 signext undef)
-  %i404 = load volatile i80, ptr bitcast (<{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_901 to ptr), align 8
+  %i404 = load volatile i80, ptr @g_901, align 8
   %i405 = shl i80 %i404, 39
   %i406 = ashr i80 %i405, 62
   %i407 = shl nsw i80 %i406, 32
   %i408 = trunc i80 %i407 to i64
   %i409 = ashr exact i64 %i408, 32
   call fastcc void @transparent_crc(i64 %i409, ptr @.str.191, i32 signext undef)
-  %i410 = load volatile i80, ptr bitcast (<{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_901 to ptr), align 8
+  %i410 = load volatile i80, ptr @g_901, align 8
   %i411 = shl i80 %i410, 57
   %i412 = ashr i80 %i411, 58
   %i413 = shl nsw i80 %i412, 32
   %i414 = trunc i80 %i413 to i64
   %i415 = ashr exact i64 %i414, 32
   call fastcc void @transparent_crc(i64 %i415, ptr @.str.192, i32 signext undef)
-  %i416 = load i80, ptr getelementptr inbounds (<{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_901, i64 0, i32 0, i32 1), align 2
+  %i416 = load i80, ptr getelementptr inbounds (<{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, ptr @g_901, i64 0, i32 0, i32 1), align 2
   %i417 = lshr i80 %i416, 49
   %i418 = trunc i80 %i417 to i64
   call fastcc void @transparent_crc(i64 %i418, ptr @.str.193, i32 signext undef)
-  %i419 = load volatile i80, ptr getelementptr inbounds (<{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_901, i64 0, i32 0, i32 1), align 2
+  %i419 = load volatile i80, ptr getelementptr inbounds (<{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, ptr @g_901, i64 0, i32 0, i32 1), align 2
   call fastcc void @transparent_crc(i64 0, ptr @.str.194, i32 signext undef)
-  %i420 = load i80, ptr getelementptr inbounds (<{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_901, i64 0, i32 0, i32 1), align 2
+  %i420 = load i80, ptr getelementptr inbounds (<{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, ptr @g_901, i64 0, i32 0, i32 1), align 2
   %i421 = shl i80 %i420, 56
   %i422 = ashr i80 %i421, 68
   %i423 = shl nsw i80 %i422, 32
   %i424 = trunc i80 %i423 to i64
   %i425 = ashr exact i64 %i424, 32
   call fastcc void @transparent_crc(i64 %i425, ptr @.str.195, i32 signext undef)
-  %i426 = load i80, ptr getelementptr inbounds (<{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_901, i64 0, i32 0, i32 1), align 2
+  %i426 = load i80, ptr getelementptr inbounds (<{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, ptr @g_901, i64 0, i32 0, i32 1), align 2
   %i427 = lshr i80 %i426, 11
   %i428 = trunc i80 %i427 to i64
   %i429 = and i64 %i428, 1
   call fastcc void @transparent_crc(i64 %i429, ptr @.str.196, i32 signext undef)
-  %i430 = load volatile i80, ptr getelementptr inbounds (<{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_901, i64 0, i32 0, i32 1), align 2
+  %i430 = load volatile i80, ptr getelementptr inbounds (<{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, ptr @g_901, i64 0, i32 0, i32 1), align 2
   %i431 = shl i80 %i430, 69
   %i432 = ashr i80 %i431, 72
   %i433 = shl nsw i80 %i432, 32
   %i434 = trunc i80 %i433 to i64
   %i435 = ashr exact i64 %i434, 32
   call fastcc void @transparent_crc(i64 %i435, ptr @.str.197, i32 signext undef)
-  %i436 = load volatile i80, ptr getelementptr inbounds (<{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_901, i64 0, i32 1, i32 0, i32 0), align 4
+  %i436 = load volatile i80, ptr getelementptr inbounds (<{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, ptr @g_901, i64 0, i32 1, i32 0, i32 0), align 4
   %i437 = lshr i80 %i436, 57
   %i438 = trunc i80 %i437 to i64
   call fastcc void @transparent_crc(i64 %i438, ptr @.str.189, i32 signext undef)
-  %i439 = load volatile i80, ptr getelementptr inbounds (<{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_901, i64 0, i32 1, i32 0, i32 0), align 4
+  %i439 = load volatile i80, ptr getelementptr inbounds (<{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, ptr @g_901, i64 0, i32 1, i32 0, i32 0), align 4
   %i440 = shl i80 %i439, 23
   %i441 = ashr i80 %i440, 64
   %i442 = shl nsw i80 %i441, 32
   %i443 = trunc i80 %i442 to i64
   %i444 = ashr exact i64 %i443, 32
   call fastcc void @transparent_crc(i64 %i444, ptr @.str.190, i32 signext undef)
-  %i445 = load volatile i80, ptr getelementptr inbounds (<{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_901, i64 0, i32 1, i32 0, i32 0), align 4
+  %i445 = load volatile i80, ptr getelementptr inbounds (<{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, ptr @g_901, i64 0, i32 1, i32 0, i32 0), align 4
   %i446 = shl i80 %i445, 39
   %i447 = ashr i80 %i446, 62
   %i448 = shl nsw i80 %i447, 32
   %i449 = trunc i80 %i448 to i64
   %i450 = ashr exact i64 %i449, 32
   call fastcc void @transparent_crc(i64 %i450, ptr @.str.191, i32 signext undef)
-  %i451 = load volatile i80, ptr getelementptr inbounds (<{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_901, i64 0, i32 1, i32 0, i32 0), align 4
+  %i451 = load volatile i80, ptr getelementptr inbounds (<{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, ptr @g_901, i64 0, i32 1, i32 0, i32 0), align 4
   %i452 = shl i80 %i451, 57
   %i453 = ashr i80 %i452, 58
   %i454 = shl nsw i80 %i453, 32
   %i455 = trunc i80 %i454 to i64
   %i456 = ashr exact i64 %i455, 32
   call fastcc void @transparent_crc(i64 %i456, ptr @.str.192, i32 signext undef)
-  %i457 = load i80, ptr getelementptr inbounds (<{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_901, i64 0, i32 1, i32 1), align 2
+  %i457 = load i80, ptr getelementptr inbounds (<{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, ptr @g_901, i64 0, i32 1, i32 1), align 2
   %i458 = lshr i80 %i457, 49
   %i459 = trunc i80 %i458 to i64
   call fastcc void @transparent_crc(i64 %i459, ptr @.str.193, i32 signext undef)
-  %i460 = load volatile i80, ptr getelementptr inbounds (<{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_901, i64 0, i32 1, i32 1), align 2
+  %i460 = load volatile i80, ptr getelementptr inbounds (<{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, ptr @g_901, i64 0, i32 1, i32 1), align 2
   %i461 = lshr i80 %i460, 24
   %i462 = trunc i80 %i461 to i64
   %i463 = and i64 %i462, 33554431
   call fastcc void @transparent_crc(i64 %i463, ptr @.str.194, i32 signext undef)
-  %i464 = load i80, ptr getelementptr inbounds (<{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_901, i64 0, i32 1, i32 1), align 2
+  %i464 = load i80, ptr getelementptr inbounds (<{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, ptr @g_901, i64 0, i32 1, i32 1), align 2
   %i465 = shl i80 %i464, 56
   %i466 = ashr i80 %i465, 68
   %i467 = shl nsw i80 %i466, 32
@@ -4134,7 +4134,7 @@ bb25:                                             ; preds = %bb15
   %i495 = trunc i80 %i494 to i64
   %i496 = ashr exact i64 %i495, 32
   call fastcc void @transparent_crc(i64 %i496, ptr @.str.233, i32 signext undef)
-  %i497 = getelementptr inbounds [9 x [2 x [1 x %4]]], ptr bitcast (<{ <{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, <{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, <{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, <{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, <{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, <{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, <{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, <{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, <{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }> }>* @g_905 to ptr), i64 0, i64 0, i64 1, i64 0
+  %i497 = getelementptr inbounds [9 x [2 x [1 x %4]]], ptr @g_905, i64 0, i64 0, i64 1, i64 0
   %i499 = load volatile i80, ptr %i497, align 2
   %i500 = lshr i80 %i499, 57
   %i501 = trunc i80 %i500 to i64
@@ -4243,7 +4243,7 @@ bb25:                                             ; preds = %bb15
   %i583 = trunc i80 %i582 to i64
   %i584 = ashr exact i64 %i583, 32
   call fastcc void @transparent_crc(i64 %i584, ptr @.str.261, i32 signext undef)
-  %i585 = getelementptr inbounds [3 x [10 x [4 x %4]]], ptr bitcast (<{ <{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, <{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, <{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }> }>* @g_908 to ptr), i64 0, i64 1, i64 0, i64 0
+  %i585 = getelementptr inbounds [3 x [10 x [4 x %4]]], ptr @g_908, i64 0, i64 1, i64 0, i64 0
   %i587 = load volatile i80, ptr %i585, align 2
   %i588 = lshr i80 %i587, 57
   %i589 = trunc i80 %i588 to i64
@@ -4334,7 +4334,7 @@ bb25:                                             ; preds = %bb15
   call fastcc void @transparent_crc(i64 %i658, ptr @.str.263, i32 signext undef)
   %i659 = load volatile i80, ptr undef, align 2
   call fastcc void @transparent_crc(i64 undef, ptr @.str.265, i32 signext undef)
-  %i660 = getelementptr inbounds [6 x [2 x [7 x %4]]], ptr bitcast (<{ <{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, <{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, <{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, <{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, <{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, <{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }> }>* @g_909 to ptr), i64 0, i64 0, i64 0, i64 0, i32 1
+  %i660 = getelementptr inbounds [6 x [2 x [7 x %4]]], ptr @g_909, i64 0, i64 0, i64 0, i64 0, i32 1
   %i662 = load i80, ptr %i660, align 2
   %i663 = lshr i80 %i662, 49
   %i664 = trunc i80 %i663 to i64
@@ -4552,7 +4552,7 @@ bb25:                                             ; preds = %bb15
   %i838 = trunc i80 %i837 to i64
   %i839 = ashr exact i64 %i838, 32
   call fastcc void @transparent_crc(i64 %i839, ptr @.str.337, i32 signext undef)
-  %i840 = getelementptr inbounds [6 x %4], ptr bitcast (<{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_917 to ptr), i64 0, i64 0, i32 1
+  %i840 = getelementptr inbounds [6 x %4], ptr @g_917, i64 0, i64 0, i32 1
   %i842 = load i80, ptr %i840, align 2
   %i843 = lshr i80 %i842, 49
   %i844 = trunc i80 %i843 to i64
@@ -4615,73 +4615,73 @@ bb25:                                             ; preds = %bb15
   %i890 = trunc i80 %i889 to i64
   %i891 = ashr exact i64 %i890, 32
   call fastcc void @transparent_crc(i64 %i891, ptr @.str.351, i32 signext undef)
-  %i892 = load volatile i80, ptr bitcast (<{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>* @g_919 to ptr), align 8
+  %i892 = load volatile i80, ptr @g_919, align 8
   %i893 = lshr i80 %i892, 57
   %i894 = trunc i80 %i893 to i64
   call fastcc void @transparent_crc(i64 %i894, ptr @.str.352, i32 signext undef)
-  %i895 = load volatile i80, ptr bitcast (<{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>* @g_919 to ptr), align 8
+  %i895 = load volatile i80, ptr @g_919, align 8
   %i896 = shl i80 %i895, 23
   %i897 = ashr i80 %i896, 64
   %i898 = shl nsw i80 %i897, 32
   %i899 = trunc i80 %i898 to i64
   %i900 = ashr exact i64 %i899, 32
   call fastcc void @transparent_crc(i64 %i900, ptr @.str.353, i32 signext undef)
-  %i901 = load volatile i80, ptr bitcast (<{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>* @g_919 to ptr), align 8
+  %i901 = load volatile i80, ptr @g_919, align 8
   %i902 = shl i80 %i901, 39
   %i903 = ashr i80 %i902, 62
   %i904 = shl nsw i80 %i903, 32
   %i905 = trunc i80 %i904 to i64
   %i906 = ashr exact i64 %i905, 32
   call fastcc void @transparent_crc(i64 %i906, ptr @.str.354, i32 signext undef)
-  %i907 = load volatile i80, ptr bitcast (<{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>* @g_919 to ptr), align 8
+  %i907 = load volatile i80, ptr @g_919, align 8
   %i908 = shl i80 %i907, 57
   %i909 = ashr i80 %i908, 58
   %i910 = shl nsw i80 %i909, 32
   %i911 = trunc i80 %i910 to i64
   %i912 = ashr exact i64 %i911, 32
   call fastcc void @transparent_crc(i64 %i912, ptr @.str.355, i32 signext undef)
-  %i913 = load i80, ptr getelementptr inbounds (<{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, <{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>* @g_919, i64 0, i32 0, i32 0, i32 1), align 2
+  %i913 = load i80, ptr getelementptr inbounds (<{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, ptr @g_919, i64 0, i32 0, i32 0, i32 1), align 2
   %i914 = lshr i80 %i913, 49
   %i915 = trunc i80 %i914 to i64
   call fastcc void @transparent_crc(i64 %i915, ptr @.str.356, i32 signext undef)
-  %i916 = load volatile i80, ptr getelementptr inbounds (<{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, <{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>* @g_919, i64 0, i32 0, i32 0, i32 1), align 2
+  %i916 = load volatile i80, ptr getelementptr inbounds (<{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, ptr @g_919, i64 0, i32 0, i32 0, i32 1), align 2
   %i917 = lshr i80 %i916, 24
   %i918 = trunc i80 %i917 to i64
   %i919 = and i64 %i918, 33554431
   call fastcc void @transparent_crc(i64 %i919, ptr @.str.357, i32 signext undef)
-  %i920 = load i80, ptr getelementptr inbounds (<{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, <{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>* @g_919, i64 0, i32 0, i32 0, i32 1), align 2
+  %i920 = load i80, ptr getelementptr inbounds (<{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, ptr @g_919, i64 0, i32 0, i32 0, i32 1), align 2
   %i921 = shl i80 %i920, 56
   %i922 = ashr i80 %i921, 68
   %i923 = shl nsw i80 %i922, 32
   %i924 = trunc i80 %i923 to i64
   %i925 = ashr exact i64 %i924, 32
   call fastcc void @transparent_crc(i64 %i925, ptr @.str.358, i32 signext undef)
-  %i926 = load i80, ptr getelementptr inbounds (<{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, <{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>* @g_919, i64 0, i32 0, i32 0, i32 1), align 2
+  %i926 = load i80, ptr getelementptr inbounds (<{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, ptr @g_919, i64 0, i32 0, i32 0, i32 1), align 2
   %i927 = lshr i80 %i926, 11
   %i928 = trunc i80 %i927 to i64
   %i929 = and i64 %i928, 1
   call fastcc void @transparent_crc(i64 %i929, ptr @.str.359, i32 signext undef)
-  %i930 = load volatile i80, ptr getelementptr inbounds (<{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, <{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>* @g_919, i64 0, i32 0, i32 0, i32 1), align 2
+  %i930 = load volatile i80, ptr getelementptr inbounds (<{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, ptr @g_919, i64 0, i32 0, i32 0, i32 1), align 2
   %i931 = shl i80 %i930, 69
   %i932 = ashr i80 %i931, 72
   %i933 = shl nsw i80 %i932, 32
   %i934 = trunc i80 %i933 to i64
   %i935 = ashr exact i64 %i934, 32
   call fastcc void @transparent_crc(i64 %i935, ptr @.str.360, i32 signext undef)
-  %i936 = load volatile i80, ptr getelementptr inbounds (<{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, <{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>* @g_919, i64 0, i32 0, i32 1, i32 0, i32 0), align 4
+  %i936 = load volatile i80, ptr getelementptr inbounds (<{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, ptr @g_919, i64 0, i32 0, i32 1, i32 0, i32 0), align 4
   %i937 = lshr i80 %i936, 57
   %i938 = trunc i80 %i937 to i64
   call fastcc void @transparent_crc(i64 %i938, ptr @.str.352, i32 signext undef)
-  %i939 = load volatile i80, ptr getelementptr inbounds (<{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, <{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>* @g_919, i64 0, i32 0, i32 1, i32 0, i32 0), align 4
+  %i939 = load volatile i80, ptr getelementptr inbounds (<{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, ptr @g_919, i64 0, i32 0, i32 1, i32 0, i32 0), align 4
   %i940 = shl i80 %i939, 23
   %i941 = ashr i80 %i940, 64
   %i942 = shl nsw i80 %i941, 32
   %i943 = trunc i80 %i942 to i64
   %i944 = ashr exact i64 %i943, 32
   call fastcc void @transparent_crc(i64 %i944, ptr @.str.353, i32 signext undef)
-  %i945 = load volatile i80, ptr getelementptr inbounds (<{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, <{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>* @g_919, i64 0, i32 0, i32 1, i32 0, i32 0), align 4
+  %i945 = load volatile i80, ptr getelementptr inbounds (<{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, ptr @g_919, i64 0, i32 0, i32 1, i32 0, i32 0), align 4
   call fastcc void @transparent_crc(i64 0, ptr @.str.354, i32 signext undef)
-  %i946 = load volatile i80, ptr getelementptr inbounds (<{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, <{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>* @g_919, i64 0, i32 0, i32 1, i32 0, i32 0), align 4
+  %i946 = load volatile i80, ptr getelementptr inbounds (<{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, ptr @g_919, i64 0, i32 0, i32 1, i32 0, i32 0), align 4
   %i947 = shl i80 %i946, 57
   %i948 = ashr i80 %i947, 58
   %i949 = shl nsw i80 %i948, 32
@@ -4836,7 +4836,7 @@ bb25:                                             ; preds = %bb15
   %i1070 = trunc i80 %i1069 to i64
   %i1071 = ashr exact i64 %i1070, 32
   call fastcc void @transparent_crc(i64 %i1071, ptr @.str.441, i32 signext undef)
-  %i1072 = getelementptr inbounds [10 x %4], ptr bitcast (<{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_929 to ptr), i64 0, i64 0
+  %i1072 = getelementptr inbounds [10 x %4], ptr @g_929, i64 0, i64 0
   %i1074 = load volatile i80, ptr %i1072, align 2
   %i1075 = lshr i80 %i1074, 57
   %i1076 = trunc i80 %i1075 to i64
@@ -4981,7 +4981,7 @@ bb25:                                             ; preds = %bb15
   %i1189 = trunc i80 %i1188 to i64
   %i1190 = ashr exact i64 %i1189, 32
   call fastcc void @transparent_crc(i64 %i1190, ptr @.str.495, i32 signext undef)
-  %i1191 = getelementptr inbounds [10 x [6 x [4 x %4]]], ptr bitcast (<{ <{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, <{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, <{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, <{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, <{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, <{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, <{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, <{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, <{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, <{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }> }>* @g_935 to ptr), i64 0, i64 0, i64 0, i64 0
+  %i1191 = getelementptr inbounds [10 x [6 x [4 x %4]]], ptr @g_935, i64 0, i64 0, i64 0, i64 0
   %i1193 = load volatile i80, ptr %i1191, align 2
   %i1194 = lshr i80 %i1193, 57
   %i1195 = trunc i80 %i1194 to i64
@@ -5032,7 +5032,7 @@ bb25:                                             ; preds = %bb15
   %i1232 = trunc i80 %i1231 to i64
   %i1233 = ashr exact i64 %i1232, 32
   call fastcc void @transparent_crc(i64 %i1233, ptr @.str.504, i32 signext undef)
-  %i1234 = getelementptr inbounds [4 x %4], ptr bitcast (<{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_936 to ptr), i64 0, i64 0
+  %i1234 = getelementptr inbounds [4 x %4], ptr @g_936, i64 0, i64 0
   %i1236 = load volatile i80, ptr %i1234, align 2
   %i1237 = lshr i80 %i1236, 57
   %i1238 = trunc i80 %i1237 to i64
@@ -5066,7 +5066,7 @@ bb25:                                             ; preds = %bb15
   %i1260 = trunc i80 %i1259 to i64
   %i1261 = ashr exact i64 %i1260, 32
   call fastcc void @transparent_crc(i64 %i1261, ptr @.str.549, i32 signext undef)
-  %i1262 = load volatile i80, ptr bitcast (<{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_941 to ptr), align 8
+  %i1262 = load volatile i80, ptr @g_941, align 8
   %i1263 = lshr i80 %i1262, 57
   %i1264 = trunc i80 %i1263 to i64
   call fastcc void @transparent_crc(i64 %i1264, ptr @.str.550, i32 signext undef)
@@ -5208,7 +5208,7 @@ bb25:                                             ; preds = %bb15
   %i1376 = trunc i80 %i1375 to i64
   %i1377 = ashr exact i64 %i1376, 32
   call fastcc void @transparent_crc(i64 %i1377, ptr @.str.634, i32 signext undef)
-  %i1378 = getelementptr inbounds [10 x %4], ptr bitcast (<{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_950 to ptr), i64 0, i64 0, i32 1
+  %i1378 = getelementptr inbounds [10 x %4], ptr @g_950, i64 0, i64 0, i32 1
   %i1380 = load i80, ptr %i1378, align 2
   %i1381 = lshr i80 %i1380, 49
   %i1382 = trunc i80 %i1381 to i64
@@ -5537,107 +5537,107 @@ bb25:                                             ; preds = %bb15
   call fastcc void @transparent_crc(i64 %i1649, ptr @.str.695, i32 signext undef)
   %i1650 = load volatile i80, ptr @g_957, align 8
   call fastcc void @transparent_crc(i64 0, ptr @.str.736, i32 signext undef)
-  %i1651 = load i80, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_967, i64 0, i32 4, i32 0), align 2
+  %i1651 = load i80, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, ptr @g_967, i64 0, i32 4, i32 0), align 2
   %i1652 = shl i80 %i1651, 57
   %i1653 = ashr i80 %i1652, 58
   %i1654 = shl nsw i80 %i1653, 32
   %i1655 = trunc i80 %i1654 to i64
   %i1656 = ashr exact i64 %i1655, 32
   call fastcc void @transparent_crc(i64 %i1656, ptr @.str.737, i32 signext undef)
-  %i1657 = load volatile i80, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_967, i64 0, i32 5, i32 0), align 2
+  %i1657 = load volatile i80, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, ptr @g_967, i64 0, i32 5, i32 0), align 2
   %i1658 = ashr i80 %i1657, 73
   %i1659 = shl nsw i80 %i1658, 32
   %i1660 = trunc i80 %i1659 to i64
   %i1661 = ashr exact i64 %i1660, 32
   call fastcc void @transparent_crc(i64 %i1661, ptr @.str.738, i32 signext undef)
-  %i1662 = load volatile i80, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_967, i64 0, i32 5, i32 0), align 2
+  %i1662 = load volatile i80, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, ptr @g_967, i64 0, i32 5, i32 0), align 2
   %i1663 = lshr i80 %i1662, 61
   %i1664 = trunc i80 %i1663 to i64
   %i1665 = and i64 %i1664, 4095
   call fastcc void @transparent_crc(i64 %i1665, ptr @.str.739, i32 signext undef)
-  %i1666 = load volatile i80, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_967, i64 0, i32 5, i32 0), align 2
+  %i1666 = load volatile i80, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, ptr @g_967, i64 0, i32 5, i32 0), align 2
   %i1667 = shl i80 %i1666, 19
   %i1668 = ashr i80 %i1667, 59
   %i1669 = shl nsw i80 %i1668, 32
   %i1670 = trunc i80 %i1669 to i64
   %i1671 = ashr exact i64 %i1670, 32
   call fastcc void @transparent_crc(i64 %i1671, ptr @.str.740, i32 signext undef)
-  %i1672 = load i80, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_967, i64 0, i32 5, i32 0), align 2
+  %i1672 = load i80, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, ptr @g_967, i64 0, i32 5, i32 0), align 2
   %i1673 = shl i80 %i1672, 40
   %i1674 = ashr i80 %i1673, 62
   %i1675 = shl nsw i80 %i1674, 32
   %i1676 = trunc i80 %i1675 to i64
   %i1677 = ashr exact i64 %i1676, 32
   call fastcc void @transparent_crc(i64 %i1677, ptr @.str.741, i32 signext undef)
-  %i1678 = load i80, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_967, i64 0, i32 5, i32 0), align 2
+  %i1678 = load i80, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, ptr @g_967, i64 0, i32 5, i32 0), align 2
   %i1679 = lshr i80 %i1678, 4
   %i1680 = trunc i80 %i1679 to i64
   %i1681 = and i64 %i1680, 262143
   call fastcc void @transparent_crc(i64 %i1681, ptr @.str.742, i32 signext undef)
-  %i1682 = load volatile i80, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_967, i64 0, i32 6, i32 0), align 2
+  %i1682 = load volatile i80, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, ptr @g_967, i64 0, i32 6, i32 0), align 2
   %i1683 = ashr i80 %i1682, 73
   %i1684 = shl nsw i80 %i1683, 32
   %i1685 = trunc i80 %i1684 to i64
   %i1686 = ashr exact i64 %i1685, 32
   call fastcc void @transparent_crc(i64 %i1686, ptr @.str.743, i32 signext undef)
-  %i1687 = load volatile i80, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_967, i64 0, i32 6, i32 0), align 2
+  %i1687 = load volatile i80, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, ptr @g_967, i64 0, i32 6, i32 0), align 2
   %i1688 = lshr i80 %i1687, 61
   %i1689 = trunc i80 %i1688 to i64
   %i1690 = and i64 %i1689, 4095
   call fastcc void @transparent_crc(i64 %i1690, ptr @.str.744, i32 signext undef)
-  %i1691 = load volatile i80, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_967, i64 0, i32 6, i32 0), align 2
+  %i1691 = load volatile i80, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, ptr @g_967, i64 0, i32 6, i32 0), align 2
   %i1692 = shl i80 %i1691, 19
   %i1693 = ashr i80 %i1692, 59
   %i1694 = shl nsw i80 %i1693, 32
   %i1695 = trunc i80 %i1694 to i64
   %i1696 = ashr exact i64 %i1695, 32
   call fastcc void @transparent_crc(i64 %i1696, ptr @.str.745, i32 signext undef)
-  %i1697 = load i80, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_967, i64 0, i32 6, i32 0), align 2
+  %i1697 = load i80, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, ptr @g_967, i64 0, i32 6, i32 0), align 2
   %i1698 = shl i80 %i1697, 40
   %i1699 = ashr i80 %i1698, 62
   %i1700 = shl nsw i80 %i1699, 32
   %i1701 = trunc i80 %i1700 to i64
   %i1702 = ashr exact i64 %i1701, 32
   call fastcc void @transparent_crc(i64 %i1702, ptr @.str.746, i32 signext undef)
-  %i1703 = load i80, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_967, i64 0, i32 6, i32 0), align 2
+  %i1703 = load i80, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, ptr @g_967, i64 0, i32 6, i32 0), align 2
   %i1704 = lshr i80 %i1703, 4
   %i1705 = trunc i80 %i1704 to i64
   %i1706 = and i64 %i1705, 262143
   call fastcc void @transparent_crc(i64 %i1706, ptr @.str.747, i32 signext undef)
-  %i1707 = load i120, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_967, i64 0, i32 7, i32 0), align 2
+  %i1707 = load i120, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, ptr @g_967, i64 0, i32 7, i32 0), align 2
   %i1708 = lshr i120 %i1707, 107
   %i1709 = trunc i120 %i1708 to i64
   call fastcc void @transparent_crc(i64 %i1709, ptr @.str.748, i32 signext undef)
-  %i1710 = load i120, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_967, i64 0, i32 7, i32 0), align 2
+  %i1710 = load i120, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, ptr @g_967, i64 0, i32 7, i32 0), align 2
   %i1711 = lshr i120 %i1710, 78
   %i1712 = trunc i120 %i1711 to i64
   %i1713 = and i64 %i1712, 536870911
   call fastcc void @transparent_crc(i64 %i1713, ptr @.str.749, i32 signext undef)
-  %i1714 = load i120, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_967, i64 0, i32 7, i32 0), align 2
+  %i1714 = load i120, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, ptr @g_967, i64 0, i32 7, i32 0), align 2
   %i1715 = shl i120 %i1714, 42
   %i1716 = ashr i120 %i1715, 104
   %i1717 = shl nsw i120 %i1716, 32
   %i1718 = trunc i120 %i1717 to i64
   %i1719 = ashr exact i64 %i1718, 32
   call fastcc void @transparent_crc(i64 %i1719, ptr @.str.750, i32 signext undef)
-  %i1720 = load volatile i120, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_967, i64 0, i32 7, i32 0), align 2
+  %i1720 = load volatile i120, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, ptr @g_967, i64 0, i32 7, i32 0), align 2
   %i1721 = shl i120 %i1720, 58
   %i1722 = ashr i120 %i1721, 105
   %i1723 = shl nsw i120 %i1722, 32
   %i1724 = trunc i120 %i1723 to i64
   %i1725 = ashr exact i64 %i1724, 32
   call fastcc void @transparent_crc(i64 %i1725, ptr @.str.751, i32 signext undef)
-  %i1726 = load volatile i120, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_967, i64 0, i32 7, i32 0), align 2
+  %i1726 = load volatile i120, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, ptr @g_967, i64 0, i32 7, i32 0), align 2
   %i1727 = lshr i120 %i1726, 41
   %i1728 = trunc i120 %i1727 to i64
   %i1729 = and i64 %i1728, 63
   call fastcc void @transparent_crc(i64 %i1729, ptr @.str.752, i32 signext undef)
-  %i1730 = load i120, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_967, i64 0, i32 7, i32 0), align 2
+  %i1730 = load i120, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, ptr @g_967, i64 0, i32 7, i32 0), align 2
   %i1731 = lshr i120 %i1730, 19
   %i1732 = trunc i120 %i1731 to i64
   %i1733 = and i64 %i1732, 4194303
   call fastcc void @transparent_crc(i64 %i1733, ptr @.str.753, i32 signext undef)
-  %i1734 = load i120, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_967, i64 0, i32 7, i32 0), align 2
+  %i1734 = load i120, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, ptr @g_967, i64 0, i32 7, i32 0), align 2
   %i1735 = shl i120 %i1734, 101
   %i1736 = ashr exact i120 %i1735, 69
   %i1737 = trunc i120 %i1736 to i64
@@ -5649,7 +5649,7 @@ bb25:                                             ; preds = %bb15
   %i1741 = load i8, ptr undef, align 4, !tbaa !6
   %i1742 = sext i8 %i1741 to i64
   call fastcc void @transparent_crc(i64 %i1742, ptr @.str.756, i32 signext undef)
-  %i1743 = getelementptr inbounds [3 x %3], ptr bitcast (<{ { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } } }>* @g_991 to ptr), i64 0, i64 0, i32 2
+  %i1743 = getelementptr inbounds [3 x %3], ptr @g_991, i64 0, i64 0, i32 2
   %i1744 = load volatile i16, ptr %i1743, align 2, !tbaa !31
   call fastcc void @transparent_crc(i64 undef, ptr @.str.757, i32 signext undef)
   %i1745 = load i32, ptr undef, align 4, !tbaa !32
@@ -5680,7 +5680,7 @@ bb25:                                             ; preds = %bb15
   %i1766 = trunc i80 %i1765 to i64
   %i1767 = ashr exact i64 %i1766, 32
   call fastcc void @transparent_crc(i64 %i1767, ptr @.str.762, i32 signext undef)
-  %i1768 = getelementptr inbounds [3 x %3], ptr bitcast (<{ { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } } }>* @g_991 to ptr), i64 0, i64 0, i32 4, i32 1
+  %i1768 = getelementptr inbounds [3 x %3], ptr @g_991, i64 0, i64 0, i32 4, i32 1
   %i1770 = load i80, ptr %i1768, align 2
   %i1771 = lshr i80 %i1770, 49
   %i1772 = trunc i80 %i1771 to i64
@@ -5709,7 +5709,7 @@ bb25:                                             ; preds = %bb15
   %i1791 = trunc i80 %i1790 to i64
   %i1792 = ashr exact i64 %i1791, 32
   call fastcc void @transparent_crc(i64 %i1792, ptr @.str.767, i32 signext undef)
-  %i1793 = load i32, ptr getelementptr inbounds ({ i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, ptr @g_992, i64 0, i32 0), align 4, !tbaa !33
+  %i1793 = load i32, ptr @g_992, align 4, !tbaa !33
   %i1794 = zext i32 %i1793 to i64
   call fastcc void @transparent_crc(i64 %i1794, ptr @.str.768, i32 signext undef)
   %i1795 = load i8, ptr getelementptr inbounds ({ i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, ptr @g_992, i64 0, i32 1), align 4, !tbaa !6
@@ -5776,7 +5776,7 @@ bb25:                                             ; preds = %bb15
   %i1844 = load i32, ptr undef, align 4, !tbaa !33
   %i1845 = zext i32 %i1844 to i64
   call fastcc void @transparent_crc(i64 %i1845, ptr @.str.781, i32 signext undef)
-  %i1846 = getelementptr inbounds [5 x %3], ptr bitcast (<{ { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, { i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } } }>* @g_993 to ptr), i64 0, i64 0, i32 1
+  %i1846 = getelementptr inbounds [5 x %3], ptr @g_993, i64 0, i64 0, i32 1
   %i1847 = load i8, ptr %i1846, align 4, !tbaa !6
   %i1848 = sext i8 %i1847 to i64
   call fastcc void @transparent_crc(i64 %i1848, ptr @.str.782, i32 signext undef)
@@ -5836,7 +5836,7 @@ bb25:                                             ; preds = %bb15
   %i1892 = trunc i80 %i1891 to i64
   %i1893 = ashr exact i64 %i1892, 32
   call fastcc void @transparent_crc(i64 %i1893, ptr @.str.793, i32 signext undef)
-  %i1894 = load i32, ptr getelementptr inbounds ({ i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, ptr @g_994, i64 0, i32 0), align 4, !tbaa !33
+  %i1894 = load i32, ptr @g_994, align 4, !tbaa !33
   %i1895 = zext i32 %i1894 to i64
   call fastcc void @transparent_crc(i64 %i1895, ptr @.str.794, i32 signext undef)
   %i1896 = load i8, ptr getelementptr inbounds ({ i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, ptr @g_994, i64 0, i32 1), align 4, !tbaa !6
@@ -5884,7 +5884,7 @@ bb25:                                             ; preds = %bb15
   call fastcc void @transparent_crc(i64 %i1927, ptr @.str.805, i32 signext undef)
   %i1928 = load volatile i80, ptr getelementptr inbounds ({ i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, ptr @g_994, i64 0, i32 4, i32 1), align 2
   call fastcc void @transparent_crc(i64 0, ptr @.str.806, i32 signext undef)
-  %i1929 = load i32, ptr getelementptr inbounds ({ i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, ptr @g_995, i64 0, i32 0), align 4, !tbaa !33
+  %i1929 = load i32, ptr @g_995, align 4, !tbaa !33
   %i1930 = zext i32 %i1929 to i64
   call fastcc void @transparent_crc(i64 %i1930, ptr @.str.807, i32 signext undef)
   %i1931 = load i8, ptr getelementptr inbounds ({ i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, ptr @g_995, i64 0, i32 1), align 4, !tbaa !6
@@ -6010,61 +6010,61 @@ bb25:                                             ; preds = %bb15
   %i2024 = trunc i80 %i2023 to i64
   %i2025 = ashr exact i64 %i2024, 32
   call fastcc void @transparent_crc(i64 %i2025, ptr @.str.892, i32 signext undef)
-  %i2026 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1383, i64 0, i32 2, i32 0), align 1
+  %i2026 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1383, i64 0, i32 2, i32 0), align 1
   %i2027 = lshr i120 %i2026, 41
   %i2028 = trunc i120 %i2027 to i64
   %i2029 = and i64 %i2028, 63
   call fastcc void @transparent_crc(i64 %i2029, ptr @.str.908, i32 signext undef)
-  %i2030 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1383, i64 0, i32 2, i32 0), align 1
+  %i2030 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1383, i64 0, i32 2, i32 0), align 1
   %i2031 = lshr i120 %i2030, 19
   %i2032 = trunc i120 %i2031 to i64
   %i2033 = and i64 %i2032, 4194303
   call fastcc void @transparent_crc(i64 %i2033, ptr @.str.909, i32 signext undef)
-  %i2034 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1383, i64 0, i32 2, i32 0), align 1
+  %i2034 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1383, i64 0, i32 2, i32 0), align 1
   %i2035 = shl i120 %i2034, 101
   %i2036 = ashr exact i120 %i2035, 69
   %i2037 = trunc i120 %i2036 to i64
   %i2038 = ashr exact i64 %i2037, 32
   call fastcc void @transparent_crc(i64 %i2038, ptr @.str.910, i32 signext undef)
-  %i2039 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1383, i64 0, i32 3, i32 0), align 2, !tbaa !44
+  %i2039 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1383, i64 0, i32 3, i32 0), align 2, !tbaa !44
   %i2040 = zext i8 %i2039 to i64
   call fastcc void @transparent_crc(i64 %i2040, ptr @.str.911, i32 signext undef)
-  %i2041 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1383, i64 0, i32 3, i32 1), align 1, !tbaa !45
+  %i2041 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1383, i64 0, i32 3, i32 1), align 1, !tbaa !45
   %i2042 = sext i8 %i2041 to i64
   call fastcc void @transparent_crc(i64 %i2042, ptr @.str.912, i32 signext undef)
-  %i2043 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1383, i64 0, i32 3, i32 2), align 2, !tbaa !46
+  %i2043 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1383, i64 0, i32 3, i32 2), align 2, !tbaa !46
   %i2044 = sext i16 %i2043 to i64
   call fastcc void @transparent_crc(i64 %i2044, ptr @.str.913, i32 signext undef)
-  %i2045 = load i64, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1383, i64 0, i32 3, i32 3), align 2, !tbaa !47
+  %i2045 = load i64, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1383, i64 0, i32 3, i32 3), align 2, !tbaa !47
   call fastcc void @transparent_crc(i64 %i2045, ptr @.str.914, i32 signext undef)
-  %i2046 = load i32, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1383, i64 0, i32 3, i32 4), align 2, !tbaa !48
+  %i2046 = load i32, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1383, i64 0, i32 3, i32 4), align 2, !tbaa !48
   %i2047 = sext i32 %i2046 to i64
   call fastcc void @transparent_crc(i64 %i2047, ptr @.str.915, i32 signext undef)
-  %i2048 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1383, i64 0, i32 4, i32 0), align 2
+  %i2048 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1383, i64 0, i32 4, i32 0), align 2
   %i2049 = ashr i128 %i2048, 99
   %i2050 = shl nsw i128 %i2049, 32
   %i2051 = trunc i128 %i2050 to i64
   %i2052 = ashr exact i64 %i2051, 32
   call fastcc void @transparent_crc(i64 %i2052, ptr @.str.916, i32 signext undef)
-  %i2053 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1383, i64 0, i32 4, i32 0), align 2
+  %i2053 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1383, i64 0, i32 4, i32 0), align 2
   call fastcc void @transparent_crc(i64 0, ptr @.str.920, i32 signext undef)
   call fastcc void @transparent_crc(i64 undef, ptr @.str.928, i32 signext undef)
-  %i2054 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1383, i64 0, i32 5, i32 1), align 2
+  %i2054 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1383, i64 0, i32 5, i32 1), align 2
   %i2055 = lshr i80 %i2054, 11
   %i2056 = trunc i80 %i2055 to i64
   %i2057 = and i64 %i2056, 1
   call fastcc void @transparent_crc(i64 %i2057, ptr @.str.929, i32 signext undef)
-  %i2058 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1383, i64 0, i32 5, i32 1), align 2
+  %i2058 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1383, i64 0, i32 5, i32 1), align 2
   %i2059 = shl i80 %i2058, 69
   %i2060 = ashr i80 %i2059, 72
   %i2061 = shl nsw i80 %i2060, 32
   %i2062 = trunc i80 %i2061 to i64
   %i2063 = ashr exact i64 %i2062, 32
   call fastcc void @transparent_crc(i64 %i2063, ptr @.str.930, i32 signext undef)
-  %i2064 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1383, i64 0, i32 6), align 2, !tbaa !49
+  %i2064 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1383, i64 0, i32 6), align 2, !tbaa !49
   %i2065 = sext i16 %i2064 to i64
   call fastcc void @transparent_crc(i64 %i2065, ptr @.str.931, i32 signext undef)
-  %i2066 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1383, i64 0, i32 7), align 2, !tbaa !50
+  %i2066 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1383, i64 0, i32 7), align 2, !tbaa !50
   %i2067 = zext i16 %i2066 to i64
   call fastcc void @transparent_crc(i64 %i2067, ptr @.str.932, i32 signext undef)
   call fastcc void @transparent_crc(i64 -940454702, ptr @.str.933, i32 signext undef)
@@ -6073,121 +6073,121 @@ bb25:                                             ; preds = %bb15
   call fastcc void @transparent_crc(i64 464, ptr @.str.936, i32 signext undef)
   call fastcc void @transparent_crc(i64 2588, ptr @.str.937, i32 signext undef)
   call fastcc void @transparent_crc(i64 1188, ptr @.str.938, i32 signext undef)
-  %i2068 = load volatile i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1402, i64 0, i32 0), align 2, !tbaa !23
+  %i2068 = load volatile i16, ptr @g_1402, align 2, !tbaa !23
   call fastcc void @transparent_crc(i64 undef, ptr @.str.939, i32 signext undef)
-  %i2069 = load volatile i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1402, i64 0, i32 1), align 2, !tbaa !51
+  %i2069 = load volatile i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1402, i64 0, i32 1), align 2, !tbaa !51
   call fastcc void @transparent_crc(i64 undef, ptr @.str.940, i32 signext undef)
-  %i2070 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1402, i64 0, i32 2, i32 0), align 1
+  %i2070 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1402, i64 0, i32 2, i32 0), align 1
   %i2071 = lshr i120 %i2070, 107
   %i2072 = trunc i120 %i2071 to i64
   call fastcc void @transparent_crc(i64 %i2072, ptr @.str.941, i32 signext undef)
-  %i2073 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1402, i64 0, i32 2, i32 0), align 1
+  %i2073 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1402, i64 0, i32 2, i32 0), align 1
   %i2074 = lshr i120 %i2073, 78
   %i2075 = trunc i120 %i2074 to i64
   %i2076 = and i64 %i2075, 536870911
   call fastcc void @transparent_crc(i64 %i2076, ptr @.str.942, i32 signext undef)
-  %i2077 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1402, i64 0, i32 2, i32 0), align 1
+  %i2077 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1402, i64 0, i32 2, i32 0), align 1
   %i2078 = shl i120 %i2077, 42
   %i2079 = ashr i120 %i2078, 104
   %i2080 = shl nsw i120 %i2079, 32
   %i2081 = trunc i120 %i2080 to i64
   %i2082 = ashr exact i64 %i2081, 32
   call fastcc void @transparent_crc(i64 %i2082, ptr @.str.943, i32 signext undef)
-  %i2083 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1402, i64 0, i32 2, i32 0), align 1
+  %i2083 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1402, i64 0, i32 2, i32 0), align 1
   %i2084 = shl i120 %i2083, 58
   %i2085 = ashr i120 %i2084, 105
   %i2086 = shl nsw i120 %i2085, 32
   %i2087 = trunc i120 %i2086 to i64
   %i2088 = ashr exact i64 %i2087, 32
   call fastcc void @transparent_crc(i64 %i2088, ptr @.str.944, i32 signext undef)
-  %i2089 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1402, i64 0, i32 2, i32 0), align 1
+  %i2089 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1402, i64 0, i32 2, i32 0), align 1
   %i2090 = lshr i120 %i2089, 41
   %i2091 = trunc i120 %i2090 to i64
   %i2092 = and i64 %i2091, 63
   call fastcc void @transparent_crc(i64 %i2092, ptr @.str.945, i32 signext undef)
-  %i2093 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1402, i64 0, i32 2, i32 0), align 1
+  %i2093 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1402, i64 0, i32 2, i32 0), align 1
   %i2094 = lshr i120 %i2093, 19
   %i2095 = trunc i120 %i2094 to i64
   %i2096 = and i64 %i2095, 4194303
   call fastcc void @transparent_crc(i64 %i2096, ptr @.str.946, i32 signext undef)
-  %i2097 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1402, i64 0, i32 2, i32 0), align 1
+  %i2097 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1402, i64 0, i32 2, i32 0), align 1
   %i2098 = shl i120 %i2097, 101
   %i2099 = ashr exact i120 %i2098, 69
   %i2100 = trunc i120 %i2099 to i64
   %i2101 = ashr exact i64 %i2100, 32
   call fastcc void @transparent_crc(i64 %i2101, ptr @.str.947, i32 signext undef)
-  %i2102 = load volatile i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1402, i64 0, i32 3, i32 0), align 2, !tbaa !44
+  %i2102 = load volatile i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1402, i64 0, i32 3, i32 0), align 2, !tbaa !44
   call fastcc void @transparent_crc(i64 undef, ptr @.str.956, i32 signext undef)
-  %i2103 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1402, i64 0, i32 4, i32 0), align 2
+  %i2103 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1402, i64 0, i32 4, i32 0), align 2
   %i2104 = lshr i128 %i2103, 28
   %i2105 = trunc i128 %i2104 to i64
   %i2106 = and i64 %i2105, 3
   call fastcc void @transparent_crc(i64 %i2106, ptr @.str.957, i32 signext undef)
-  %i2107 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1402, i64 0, i32 4, i32 0), align 2
+  %i2107 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1402, i64 0, i32 4, i32 0), align 2
   %i2108 = shl i128 %i2107, 100
   %i2109 = ashr i128 %i2108, 107
   %i2110 = shl nsw i128 %i2109, 32
   %i2111 = trunc i128 %i2110 to i64
   %i2112 = ashr exact i64 %i2111, 32
   call fastcc void @transparent_crc(i64 %i2112, ptr @.str.958, i32 signext undef)
-  %i2113 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1402, i64 0, i32 5, i32 0, i32 0), align 2
+  %i2113 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1402, i64 0, i32 5, i32 0, i32 0), align 2
   %i2114 = lshr i80 %i2113, 57
   %i2115 = trunc i80 %i2114 to i64
   call fastcc void @transparent_crc(i64 %i2115, ptr @.str.959, i32 signext undef)
-  %i2116 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1402, i64 0, i32 5, i32 0, i32 0), align 2
+  %i2116 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1402, i64 0, i32 5, i32 0, i32 0), align 2
   %i2117 = shl i80 %i2116, 23
   %i2118 = ashr i80 %i2117, 64
   %i2119 = shl nsw i80 %i2118, 32
   %i2120 = trunc i80 %i2119 to i64
   %i2121 = ashr exact i64 %i2120, 32
   call fastcc void @transparent_crc(i64 %i2121, ptr @.str.960, i32 signext undef)
-  %i2122 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1402, i64 0, i32 5, i32 0, i32 0), align 2
+  %i2122 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1402, i64 0, i32 5, i32 0, i32 0), align 2
   %i2123 = shl i80 %i2122, 39
   %i2124 = ashr i80 %i2123, 62
   %i2125 = shl nsw i80 %i2124, 32
   %i2126 = trunc i80 %i2125 to i64
   %i2127 = ashr exact i64 %i2126, 32
   call fastcc void @transparent_crc(i64 %i2127, ptr @.str.961, i32 signext undef)
-  %i2128 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1402, i64 0, i32 5, i32 0, i32 0), align 2
+  %i2128 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1402, i64 0, i32 5, i32 0, i32 0), align 2
   %i2129 = shl i80 %i2128, 57
   %i2130 = ashr i80 %i2129, 58
   %i2131 = shl nsw i80 %i2130, 32
   %i2132 = trunc i80 %i2131 to i64
   %i2133 = ashr exact i64 %i2132, 32
   call fastcc void @transparent_crc(i64 %i2133, ptr @.str.962, i32 signext undef)
-  %i2134 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1402, i64 0, i32 5, i32 1), align 2
+  %i2134 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1402, i64 0, i32 5, i32 1), align 2
   %i2135 = lshr i80 %i2134, 49
   %i2136 = trunc i80 %i2135 to i64
   call fastcc void @transparent_crc(i64 %i2136, ptr @.str.963, i32 signext undef)
-  %i2137 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1402, i64 0, i32 5, i32 1), align 2
+  %i2137 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1402, i64 0, i32 5, i32 1), align 2
   %i2138 = lshr i80 %i2137, 24
   %i2139 = trunc i80 %i2138 to i64
   %i2140 = and i64 %i2139, 33554431
   call fastcc void @transparent_crc(i64 %i2140, ptr @.str.964, i32 signext undef)
-  %i2141 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1402, i64 0, i32 5, i32 1), align 2
+  %i2141 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1402, i64 0, i32 5, i32 1), align 2
   %i2142 = shl i80 %i2141, 56
   %i2143 = ashr i80 %i2142, 68
   %i2144 = shl nsw i80 %i2143, 32
   %i2145 = trunc i80 %i2144 to i64
   %i2146 = ashr exact i64 %i2145, 32
   call fastcc void @transparent_crc(i64 %i2146, ptr @.str.965, i32 signext undef)
-  %i2147 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1402, i64 0, i32 5, i32 1), align 2
+  %i2147 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1402, i64 0, i32 5, i32 1), align 2
   %i2148 = lshr i80 %i2147, 11
   %i2149 = trunc i80 %i2148 to i64
   %i2150 = and i64 %i2149, 1
   call fastcc void @transparent_crc(i64 %i2150, ptr @.str.966, i32 signext undef)
-  %i2151 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1402, i64 0, i32 5, i32 1), align 2
+  %i2151 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1402, i64 0, i32 5, i32 1), align 2
   %i2152 = shl i80 %i2151, 69
   %i2153 = ashr i80 %i2152, 72
   %i2154 = shl nsw i80 %i2153, 32
   %i2155 = trunc i80 %i2154 to i64
   %i2156 = ashr exact i64 %i2155, 32
   call fastcc void @transparent_crc(i64 %i2156, ptr @.str.967, i32 signext undef)
-  %i2157 = load volatile i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1402, i64 0, i32 6), align 2, !tbaa !49
+  %i2157 = load volatile i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1402, i64 0, i32 6), align 2, !tbaa !49
   call fastcc void @transparent_crc(i64 undef, ptr @.str.968, i32 signext undef)
-  %i2158 = load volatile i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1402, i64 0, i32 7), align 2, !tbaa !50
+  %i2158 = load volatile i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1402, i64 0, i32 7), align 2, !tbaa !50
   call fastcc void @transparent_crc(i64 undef, ptr @.str.969, i32 signext undef)
-  %i2159 = load i32, ptr getelementptr inbounds ({ i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, ptr @g_1438, i64 0, i32 0), align 4, !tbaa !33
+  %i2159 = load i32, ptr @g_1438, align 4, !tbaa !33
   %i2160 = zext i32 %i2159 to i64
   call fastcc void @transparent_crc(i64 %i2160, ptr @.str.970, i32 signext undef)
   %i2161 = load i8, ptr getelementptr inbounds ({ i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, ptr @g_1438, i64 0, i32 1), align 4, !tbaa !6
@@ -6246,64 +6246,64 @@ bb25:                                             ; preds = %bb15
   call fastcc void @transparent_crc(i64 %i2203, ptr @.str.981, i32 signext undef)
   %i2204 = load volatile i80, ptr getelementptr inbounds ({ i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, ptr @g_1438, i64 0, i32 4, i32 1), align 2
   call fastcc void @transparent_crc(i64 undef, ptr @.str.988, i32 signext undef)
-  %i2205 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1456, i64 0, i32 2, i32 0), align 1
+  %i2205 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1456, i64 0, i32 2, i32 0), align 1
   %i2206 = lshr i120 %i2205, 41
   %i2207 = trunc i120 %i2206 to i64
   %i2208 = and i64 %i2207, 63
   call fastcc void @transparent_crc(i64 %i2208, ptr @.str.989, i32 signext undef)
-  %i2209 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1456, i64 0, i32 2, i32 0), align 1
+  %i2209 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1456, i64 0, i32 2, i32 0), align 1
   %i2210 = lshr i120 %i2209, 19
   %i2211 = trunc i120 %i2210 to i64
   %i2212 = and i64 %i2211, 4194303
   call fastcc void @transparent_crc(i64 %i2212, ptr @.str.990, i32 signext undef)
-  %i2213 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1456, i64 0, i32 2, i32 0), align 1
+  %i2213 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1456, i64 0, i32 2, i32 0), align 1
   %i2214 = shl i120 %i2213, 101
   %i2215 = ashr exact i120 %i2214, 69
   %i2216 = trunc i120 %i2215 to i64
   %i2217 = ashr exact i64 %i2216, 32
   call fastcc void @transparent_crc(i64 %i2217, ptr @.str.991, i32 signext undef)
-  %i2218 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1456, i64 0, i32 3, i32 0), align 2, !tbaa !44
+  %i2218 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1456, i64 0, i32 3, i32 0), align 2, !tbaa !44
   %i2219 = zext i8 %i2218 to i64
   call fastcc void @transparent_crc(i64 %i2219, ptr @.str.992, i32 signext undef)
-  %i2220 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1456, i64 0, i32 3, i32 1), align 1, !tbaa !45
+  %i2220 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1456, i64 0, i32 3, i32 1), align 1, !tbaa !45
   %i2221 = sext i8 %i2220 to i64
   call fastcc void @transparent_crc(i64 %i2221, ptr @.str.993, i32 signext undef)
-  %i2222 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1456, i64 0, i32 3, i32 2), align 2, !tbaa !46
+  %i2222 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1456, i64 0, i32 3, i32 2), align 2, !tbaa !46
   %i2223 = sext i16 %i2222 to i64
   call fastcc void @transparent_crc(i64 %i2223, ptr @.str.994, i32 signext undef)
   call fastcc void @transparent_crc(i64 0, ptr @.str.1006, i32 signext undef)
-  %i2224 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1456, i64 0, i32 5, i32 1), align 2
+  %i2224 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1456, i64 0, i32 5, i32 1), align 2
   %i2225 = lshr i80 %i2224, 49
   %i2226 = trunc i80 %i2225 to i64
   call fastcc void @transparent_crc(i64 %i2226, ptr @.str.1007, i32 signext undef)
-  %i2227 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1456, i64 0, i32 5, i32 1), align 2
+  %i2227 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1456, i64 0, i32 5, i32 1), align 2
   %i2228 = lshr i80 %i2227, 24
   %i2229 = trunc i80 %i2228 to i64
   %i2230 = and i64 %i2229, 33554431
   call fastcc void @transparent_crc(i64 %i2230, ptr @.str.1008, i32 signext undef)
-  %i2231 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1456, i64 0, i32 5, i32 1), align 2
+  %i2231 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1456, i64 0, i32 5, i32 1), align 2
   %i2232 = shl i80 %i2231, 56
   %i2233 = ashr i80 %i2232, 68
   %i2234 = shl nsw i80 %i2233, 32
   %i2235 = trunc i80 %i2234 to i64
   %i2236 = ashr exact i64 %i2235, 32
   call fastcc void @transparent_crc(i64 %i2236, ptr @.str.1009, i32 signext undef)
-  %i2237 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1456, i64 0, i32 5, i32 1), align 2
+  %i2237 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1456, i64 0, i32 5, i32 1), align 2
   %i2238 = lshr i80 %i2237, 11
   %i2239 = trunc i80 %i2238 to i64
   %i2240 = and i64 %i2239, 1
   call fastcc void @transparent_crc(i64 %i2240, ptr @.str.1010, i32 signext undef)
-  %i2241 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1456, i64 0, i32 5, i32 1), align 2
+  %i2241 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1456, i64 0, i32 5, i32 1), align 2
   %i2242 = shl i80 %i2241, 69
   %i2243 = ashr i80 %i2242, 72
   %i2244 = shl nsw i80 %i2243, 32
   %i2245 = trunc i80 %i2244 to i64
   %i2246 = ashr exact i64 %i2245, 32
   call fastcc void @transparent_crc(i64 %i2246, ptr @.str.1011, i32 signext undef)
-  %i2247 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1456, i64 0, i32 6), align 2, !tbaa !49
+  %i2247 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1456, i64 0, i32 6), align 2, !tbaa !49
   %i2248 = sext i16 %i2247 to i64
   call fastcc void @transparent_crc(i64 %i2248, ptr @.str.1012, i32 signext undef)
-  %i2249 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1456, i64 0, i32 7), align 2, !tbaa !50
+  %i2249 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1456, i64 0, i32 7), align 2, !tbaa !50
   %i2250 = zext i16 %i2249 to i64
   call fastcc void @transparent_crc(i64 %i2250, ptr @.str.1013, i32 signext undef)
   %i2251 = load volatile i80, ptr undef, align 2
@@ -6331,7 +6331,7 @@ bb25:                                             ; preds = %bb15
   %i2270 = trunc i80 %i2269 to i64
   %i2271 = ashr exact i64 %i2270, 32
   call fastcc void @transparent_crc(i64 %i2271, ptr @.str.1017, i32 signext undef)
-  %i2272 = getelementptr inbounds [4 x [7 x %4]], ptr bitcast (<{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>* @g_1482 to ptr), i64 0, i64 0, i64 0, i32 1
+  %i2272 = getelementptr inbounds [4 x [7 x %4]], ptr @g_1482, i64 0, i64 0, i64 0, i32 1
   %i2274 = load i80, ptr %i2272, align 2
   %i2275 = lshr i80 %i2274, 49
   %i2276 = trunc i80 %i2275 to i64
@@ -6432,7 +6432,7 @@ bb25:                                             ; preds = %bb15
   %i2354 = trunc i80 %i2353 to i64
   %i2355 = ashr exact i64 %i2354, 32
   call fastcc void @transparent_crc(i64 %i2355, ptr @.str.1041, i32 signext undef)
-  %i2356 = load volatile i32, ptr getelementptr inbounds ({ i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i32, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, ptr @g_1669, i64 0, i32 0), align 8, !tbaa !52
+  %i2356 = load volatile i32, ptr @g_1669, align 8, !tbaa !52
   call fastcc void @transparent_crc(i64 undef, ptr @.str.1042, i32 signext undef)
   %i2357 = load i80, ptr getelementptr inbounds ({ i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i32, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, ptr @g_1669, i64 0, i32 1, i32 0), align 4
   %i2358 = lshr i80 %i2357, 57
@@ -6559,7 +6559,7 @@ bb25:                                             ; preds = %bb15
   %i2454 = trunc i80 %i2453 to i64
   %i2455 = and i64 %i2454, 262143
   call fastcc void @transparent_crc(i64 %i2455, ptr @.str.1068, i32 signext undef)
-  %i2456 = load volatile i32, ptr getelementptr inbounds ({ i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i32, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, ptr @g_1671, i64 0, i32 0), align 8, !tbaa !52
+  %i2456 = load volatile i32, ptr @g_1671, align 8, !tbaa !52
   call fastcc void @transparent_crc(i64 undef, ptr @.str.1069, i32 signext undef)
   %i2457 = load i80, ptr getelementptr inbounds ({ i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i32, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, ptr @g_1671, i64 0, i32 1, i32 0), align 4
   %i2458 = lshr i80 %i2457, 57
@@ -6604,14 +6604,14 @@ bb25:                                             ; preds = %bb15
   call fastcc void @transparent_crc(i64 %i2489, ptr @.str.1079, i32 signext undef)
   %i2490 = load volatile i80, ptr getelementptr inbounds ({ i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i32, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, ptr @g_1671, i64 0, i32 3, i32 1), align 2
   call fastcc void @transparent_crc(i64 undef, ptr @.str.1135, i32 signext undef)
-  %i2491 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1783, i64 0, i32 5, i32 0, i32 0), align 2
+  %i2491 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1783, i64 0, i32 5, i32 0, i32 0), align 2
   %i2492 = shl i80 %i2491, 39
   %i2493 = ashr i80 %i2492, 62
   %i2494 = shl nsw i80 %i2493, 32
   %i2495 = trunc i80 %i2494 to i64
   %i2496 = ashr exact i64 %i2495, 32
   call fastcc void @transparent_crc(i64 %i2496, ptr @.str.1136, i32 signext undef)
-  %i2497 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1783, i64 0, i32 5, i32 0, i32 0), align 2
+  %i2497 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1783, i64 0, i32 5, i32 0, i32 0), align 2
   %i2498 = shl i80 %i2497, 57
   %i2499 = ashr i80 %i2498, 58
   %i2500 = shl nsw i80 %i2499, 32
@@ -6619,14 +6619,14 @@ bb25:                                             ; preds = %bb15
   %i2502 = ashr exact i64 %i2501, 32
   call fastcc void @transparent_crc(i64 %i2502, ptr @.str.1137, i32 signext undef)
   call fastcc void @transparent_crc(i64 4294, ptr @.str.1138, i32 signext undef)
-  %i2503 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1783, i64 0, i32 5, i32 1), align 2
+  %i2503 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1783, i64 0, i32 5, i32 1), align 2
   %i2504 = lshr i80 %i2503, 24
   %i2505 = trunc i80 %i2504 to i64
   %i2506 = and i64 %i2505, 33554431
   call fastcc void @transparent_crc(i64 %i2506, ptr @.str.1139, i32 signext undef)
   call fastcc void @transparent_crc(i64 -17, ptr @.str.1140, i32 signext undef)
   call fastcc void @transparent_crc(i64 0, ptr @.str.1141, i32 signext undef)
-  %i2507 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_1783, i64 0, i32 5, i32 1), align 2
+  %i2507 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_1783, i64 0, i32 5, i32 1), align 2
   %i2508 = shl i80 %i2507, 69
   %i2509 = ashr i80 %i2508, 72
   %i2510 = shl nsw i80 %i2509, 32
@@ -6694,7 +6694,7 @@ bb25:                                             ; preds = %bb15
   %i2556 = load i32, ptr undef, align 2, !tbaa !48
   %i2557 = sext i32 %i2556 to i64
   call fastcc void @transparent_crc(i64 %i2557, ptr @.str.1158, i32 signext undef)
-  %i2558 = getelementptr inbounds [10 x [6 x %5]], ptr bitcast (<{ <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }> }>* @g_1786 to ptr), i64 0, i64 0, i64 0, i32 4, i32 0
+  %i2558 = getelementptr inbounds [10 x [6 x %5]], ptr @g_1786, i64 0, i64 0, i64 0, i32 4, i32 0
   %i2559 = load volatile i128, ptr %i2558, align 2
   call fastcc void @transparent_crc(i64 undef, ptr @.str.1164, i32 signext undef)
   %i2560 = load volatile i80, ptr undef, align 2
@@ -6797,7 +6797,7 @@ bb25:                                             ; preds = %bb15
   %i2638 = load i8, ptr undef, align 2, !tbaa !44
   %i2639 = zext i8 %i2638 to i64
   call fastcc void @transparent_crc(i64 %i2639, ptr @.str.1194, i32 signext undef)
-  %i2640 = getelementptr inbounds [4 x [7 x %5]], ptr bitcast (<{ <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }> }>* @g_1889 to ptr), i64 0, i64 0, i64 0, i32 3, i32 1
+  %i2640 = getelementptr inbounds [4 x [7 x %5]], ptr @g_1889, i64 0, i64 0, i64 0, i32 3, i32 1
   %i2641 = load i8, ptr %i2640, align 1, !tbaa !45
   %i2642 = sext i8 %i2641 to i64
   call fastcc void @transparent_crc(i64 %i2642, ptr @.str.1195, i32 signext undef)
@@ -6820,71 +6820,71 @@ bb25:                                             ; preds = %bb15
   call fastcc void @transparent_crc(i64 %i2655, ptr @.str.1205, i32 signext undef)
   %i2656 = load volatile i80, ptr undef, align 2
   call fastcc void @transparent_crc(i64 0, ptr @.str.1299, i32 signext undef)
-  %i2657 = load volatile i80, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_2172, i64 0, i32 5, i32 0), align 2
+  %i2657 = load volatile i80, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, ptr @g_2172, i64 0, i32 5, i32 0), align 2
   call fastcc void @transparent_crc(i64 0, ptr @.str.1301, i32 signext undef)
-  %i2658 = load i80, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_2172, i64 0, i32 5, i32 0), align 2
+  %i2658 = load i80, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, ptr @g_2172, i64 0, i32 5, i32 0), align 2
   %i2659 = lshr i80 %i2658, 4
   %i2660 = trunc i80 %i2659 to i64
   %i2661 = and i64 %i2660, 262143
   call fastcc void @transparent_crc(i64 %i2661, ptr @.str.1302, i32 signext undef)
-  %i2662 = load volatile i80, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_2172, i64 0, i32 6, i32 0), align 2
+  %i2662 = load volatile i80, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, ptr @g_2172, i64 0, i32 6, i32 0), align 2
   %i2663 = ashr i80 %i2662, 73
   %i2664 = shl nsw i80 %i2663, 32
   %i2665 = trunc i80 %i2664 to i64
   %i2666 = ashr exact i64 %i2665, 32
   call fastcc void @transparent_crc(i64 %i2666, ptr @.str.1303, i32 signext undef)
-  %i2667 = load volatile i80, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_2172, i64 0, i32 6, i32 0), align 2
+  %i2667 = load volatile i80, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, ptr @g_2172, i64 0, i32 6, i32 0), align 2
   %i2668 = lshr i80 %i2667, 61
   %i2669 = trunc i80 %i2668 to i64
   %i2670 = and i64 %i2669, 4095
   call fastcc void @transparent_crc(i64 %i2670, ptr @.str.1304, i32 signext undef)
-  %i2671 = load volatile i80, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_2172, i64 0, i32 6, i32 0), align 2
+  %i2671 = load volatile i80, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, ptr @g_2172, i64 0, i32 6, i32 0), align 2
   %i2672 = shl i80 %i2671, 19
   %i2673 = ashr i80 %i2672, 59
   %i2674 = shl nsw i80 %i2673, 32
   %i2675 = trunc i80 %i2674 to i64
   %i2676 = ashr exact i64 %i2675, 32
   call fastcc void @transparent_crc(i64 %i2676, ptr @.str.1305, i32 signext undef)
-  %i2677 = load i80, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_2172, i64 0, i32 6, i32 0), align 2
+  %i2677 = load i80, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, ptr @g_2172, i64 0, i32 6, i32 0), align 2
   %i2678 = shl i80 %i2677, 40
   %i2679 = ashr i80 %i2678, 62
   %i2680 = shl nsw i80 %i2679, 32
   %i2681 = trunc i80 %i2680 to i64
   %i2682 = ashr exact i64 %i2681, 32
   call fastcc void @transparent_crc(i64 %i2682, ptr @.str.1306, i32 signext undef)
-  %i2683 = load i80, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_2172, i64 0, i32 6, i32 0), align 2
+  %i2683 = load i80, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, ptr @g_2172, i64 0, i32 6, i32 0), align 2
   %i2684 = lshr i80 %i2683, 4
   %i2685 = trunc i80 %i2684 to i64
   %i2686 = and i64 %i2685, 262143
   call fastcc void @transparent_crc(i64 %i2686, ptr @.str.1307, i32 signext undef)
-  %i2687 = load i120, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_2172, i64 0, i32 7, i32 0), align 2
+  %i2687 = load i120, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, ptr @g_2172, i64 0, i32 7, i32 0), align 2
   %i2688 = lshr i120 %i2687, 107
   %i2689 = trunc i120 %i2688 to i64
   call fastcc void @transparent_crc(i64 %i2689, ptr @.str.1308, i32 signext undef)
-  %i2690 = load i120, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_2172, i64 0, i32 7, i32 0), align 2
+  %i2690 = load i120, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, ptr @g_2172, i64 0, i32 7, i32 0), align 2
   %i2691 = lshr i120 %i2690, 78
   %i2692 = trunc i120 %i2691 to i64
   %i2693 = and i64 %i2692, 536870911
   call fastcc void @transparent_crc(i64 %i2693, ptr @.str.1309, i32 signext undef)
   call fastcc void @transparent_crc(i64 0, ptr @.str.1310, i32 signext undef)
-  %i2694 = load volatile i120, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_2172, i64 0, i32 7, i32 0), align 2
+  %i2694 = load volatile i120, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, ptr @g_2172, i64 0, i32 7, i32 0), align 2
   %i2695 = shl i120 %i2694, 58
   %i2696 = ashr i120 %i2695, 105
   %i2697 = shl nsw i120 %i2696, 32
   %i2698 = trunc i120 %i2697 to i64
   %i2699 = ashr exact i64 %i2698, 32
   call fastcc void @transparent_crc(i64 %i2699, ptr @.str.1311, i32 signext undef)
-  %i2700 = load volatile i120, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_2172, i64 0, i32 7, i32 0), align 2
+  %i2700 = load volatile i120, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, ptr @g_2172, i64 0, i32 7, i32 0), align 2
   %i2701 = lshr i120 %i2700, 41
   %i2702 = trunc i120 %i2701 to i64
   %i2703 = and i64 %i2702, 63
   call fastcc void @transparent_crc(i64 %i2703, ptr @.str.1312, i32 signext undef)
-  %i2704 = load i120, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_2172, i64 0, i32 7, i32 0), align 2
+  %i2704 = load i120, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, ptr @g_2172, i64 0, i32 7, i32 0), align 2
   %i2705 = lshr i120 %i2704, 19
   %i2706 = trunc i120 %i2705 to i64
   %i2707 = and i64 %i2706, 4194303
   call fastcc void @transparent_crc(i64 %i2707, ptr @.str.1313, i32 signext undef)
-  %i2708 = load i120, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_2172, i64 0, i32 7, i32 0), align 2
+  %i2708 = load i120, ptr getelementptr inbounds (<{ i16, i32, i32, i32, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, ptr @g_2172, i64 0, i32 7, i32 0), align 2
   %i2709 = shl i120 %i2708, 101
   %i2710 = ashr exact i120 %i2709, 69
   %i2711 = trunc i120 %i2710 to i64
@@ -6907,23 +6907,23 @@ bb25:                                             ; preds = %bb15
   %i2725 = ashr exact i64 %i2724, 32
   call fastcc void @transparent_crc(i64 %i2725, ptr @.str.1317, i32 signext undef)
   %i2726 = load volatile i120, ptr @g_2178, align 8
-  %i2727 = load i80, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, <{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>* @g_2237, i64 0, i32 4, i32 0), align 1
+  %i2727 = load i80, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, ptr @g_2237, i64 0, i32 4, i32 0), align 1
   %i2728 = shl i80 %i2727, 39
   %i2729 = ashr i80 %i2728, 62
   %i2730 = shl nsw i80 %i2729, 32
   %i2731 = trunc i80 %i2730 to i64
   %i2732 = ashr exact i64 %i2731, 32
   call fastcc void @transparent_crc(i64 %i2732, ptr @.str.1330, i32 signext undef)
-  %i2733 = load i80, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, <{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>* @g_2237, i64 0, i32 4, i32 0), align 1
+  %i2733 = load i80, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, ptr @g_2237, i64 0, i32 4, i32 0), align 1
   %i2734 = shl i80 %i2733, 57
   %i2735 = ashr i80 %i2734, 58
   %i2736 = shl nsw i80 %i2735, 32
   %i2737 = trunc i80 %i2736 to i64
   %i2738 = ashr exact i64 %i2737, 32
   call fastcc void @transparent_crc(i64 %i2738, ptr @.str.1331, i32 signext undef)
-  %i2739 = load volatile i64, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, <{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>* @g_2237, i64 0, i32 5), align 1, !tbaa !53
+  %i2739 = load volatile i64, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, ptr @g_2237, i64 0, i32 5), align 1, !tbaa !53
   call fastcc void @transparent_crc(i64 %i2739, ptr @.str.1332, i32 signext undef)
-  %i2740 = load volatile i64, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, <{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>* @g_2237, i64 0, i32 6), align 1, !tbaa !55
+  %i2740 = load volatile i64, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, ptr @g_2237, i64 0, i32 6), align 1, !tbaa !55
   call fastcc void @transparent_crc(i64 %i2740, ptr @.str.1333, i32 signext undef)
   %i2741 = load i120, ptr @g_2260, align 8
   %i2742 = lshr i120 %i2741, 107
@@ -6964,22 +6964,22 @@ bb25:                                             ; preds = %bb15
   %i2771 = trunc i120 %i2770 to i64
   %i2772 = ashr exact i64 %i2771, 32
   call fastcc void @transparent_crc(i64 %i2772, ptr @.str.1340, i32 signext undef)
-  %i2773 = load i120, ptr bitcast (<{ { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_2261 to ptr), align 8
+  %i2773 = load i120, ptr @g_2261, align 8
   %i2774 = lshr i120 %i2773, 107
   %i2775 = trunc i120 %i2774 to i64
   call fastcc void @transparent_crc(i64 %i2775, ptr @.str.1341, i32 signext undef)
   call fastcc void @transparent_crc(i64 0, ptr @.str.1344, i32 signext undef)
-  %i2776 = load volatile i120, ptr bitcast (<{ { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_2261 to ptr), align 8
+  %i2776 = load volatile i120, ptr @g_2261, align 8
   %i2777 = lshr i120 %i2776, 41
   %i2778 = trunc i120 %i2777 to i64
   %i2779 = and i64 %i2778, 63
   call fastcc void @transparent_crc(i64 %i2779, ptr @.str.1345, i32 signext undef)
-  %i2780 = load i120, ptr bitcast (<{ { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_2261 to ptr), align 8
+  %i2780 = load i120, ptr @g_2261, align 8
   %i2781 = lshr i120 %i2780, 19
   %i2782 = trunc i120 %i2781 to i64
   %i2783 = and i64 %i2782, 4194303
   call fastcc void @transparent_crc(i64 %i2783, ptr @.str.1346, i32 signext undef)
-  %i2784 = load i120, ptr bitcast (<{ { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>* @g_2261 to ptr), align 8
+  %i2784 = load i120, ptr @g_2261, align 8
   %i2785 = shl i120 %i2784, 101
   %i2786 = ashr exact i120 %i2785, 69
   %i2787 = trunc i120 %i2786 to i64
@@ -7330,7 +7330,7 @@ bb25:                                             ; preds = %bb15
   %i3065 = trunc i128 %i3064 to i64
   %i3066 = ashr exact i64 %i3065, 32
   call fastcc void @transparent_crc(i64 %i3066, ptr @.str.1696, i32 signext undef)
-  %i3067 = getelementptr inbounds [5 x %5], ptr bitcast (<{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>* @g_2928 to ptr), i64 0, i64 0, i32 5
+  %i3067 = getelementptr inbounds [5 x %5], ptr @g_2928, i64 0, i64 0, i32 5
   %i3069 = load volatile i80, ptr %i3067, align 2
   %i3070 = lshr i80 %i3069, 57
   %i3071 = trunc i80 %i3070 to i64
@@ -7390,260 +7390,260 @@ bb25:                                             ; preds = %bb15
   %i3115 = load i16, ptr undef, align 2, !tbaa !50
   %i3116 = zext i16 %i3115 to i64
   call fastcc void @transparent_crc(i64 %i3116, ptr @.str.1707, i32 signext undef)
-  %i3117 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2929, i64 0, i32 0), align 2, !tbaa !23
+  %i3117 = load i16, ptr @g_2929, align 2, !tbaa !23
   %i3118 = sext i16 %i3117 to i64
   call fastcc void @transparent_crc(i64 %i3118, ptr @.str.1708, i32 signext undef)
-  %i3119 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2929, i64 0, i32 1), align 2, !tbaa !51
+  %i3119 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2929, i64 0, i32 1), align 2, !tbaa !51
   %i3120 = sext i8 %i3119 to i64
   call fastcc void @transparent_crc(i64 %i3120, ptr @.str.1709, i32 signext undef)
-  %i3121 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2929, i64 0, i32 2, i32 0), align 1
+  %i3121 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2929, i64 0, i32 2, i32 0), align 1
   %i3122 = lshr i120 %i3121, 107
   %i3123 = trunc i120 %i3122 to i64
   call fastcc void @transparent_crc(i64 %i3123, ptr @.str.1710, i32 signext undef)
-  %i3124 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2929, i64 0, i32 2, i32 0), align 1
+  %i3124 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2929, i64 0, i32 2, i32 0), align 1
   %i3125 = lshr i120 %i3124, 78
   %i3126 = trunc i120 %i3125 to i64
   %i3127 = and i64 %i3126, 536870911
   call fastcc void @transparent_crc(i64 %i3127, ptr @.str.1711, i32 signext undef)
-  %i3128 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2929, i64 0, i32 2, i32 0), align 1
+  %i3128 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2929, i64 0, i32 2, i32 0), align 1
   %i3129 = shl i120 %i3128, 42
   %i3130 = ashr i120 %i3129, 104
   %i3131 = shl nsw i120 %i3130, 32
   %i3132 = trunc i120 %i3131 to i64
   %i3133 = ashr exact i64 %i3132, 32
   call fastcc void @transparent_crc(i64 %i3133, ptr @.str.1712, i32 signext undef)
-  %i3134 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2929, i64 0, i32 2, i32 0), align 1
+  %i3134 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2929, i64 0, i32 2, i32 0), align 1
   %i3135 = shl i120 %i3134, 58
   %i3136 = ashr i120 %i3135, 105
   %i3137 = shl nsw i120 %i3136, 32
   %i3138 = trunc i120 %i3137 to i64
   %i3139 = ashr exact i64 %i3138, 32
   call fastcc void @transparent_crc(i64 %i3139, ptr @.str.1713, i32 signext undef)
-  %i3140 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2929, i64 0, i32 2, i32 0), align 1
+  %i3140 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2929, i64 0, i32 2, i32 0), align 1
   %i3141 = lshr i120 %i3140, 41
   %i3142 = trunc i120 %i3141 to i64
   %i3143 = and i64 %i3142, 63
   call fastcc void @transparent_crc(i64 %i3143, ptr @.str.1714, i32 signext undef)
-  %i3144 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2929, i64 0, i32 2, i32 0), align 1
+  %i3144 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2929, i64 0, i32 2, i32 0), align 1
   %i3145 = lshr i120 %i3144, 19
   %i3146 = trunc i120 %i3145 to i64
   %i3147 = and i64 %i3146, 4194303
   call fastcc void @transparent_crc(i64 %i3147, ptr @.str.1715, i32 signext undef)
-  %i3148 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2929, i64 0, i32 2, i32 0), align 1
+  %i3148 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2929, i64 0, i32 2, i32 0), align 1
   %i3149 = shl i120 %i3148, 101
   %i3150 = ashr exact i120 %i3149, 69
   %i3151 = trunc i120 %i3150 to i64
   %i3152 = ashr exact i64 %i3151, 32
   call fastcc void @transparent_crc(i64 %i3152, ptr @.str.1716, i32 signext undef)
-  %i3153 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2929, i64 0, i32 3, i32 0), align 2, !tbaa !44
+  %i3153 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2929, i64 0, i32 3, i32 0), align 2, !tbaa !44
   %i3154 = zext i8 %i3153 to i64
   call fastcc void @transparent_crc(i64 %i3154, ptr @.str.1717, i32 signext undef)
-  %i3155 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2929, i64 0, i32 3, i32 1), align 1, !tbaa !45
+  %i3155 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2929, i64 0, i32 3, i32 1), align 1, !tbaa !45
   %i3156 = sext i8 %i3155 to i64
   call fastcc void @transparent_crc(i64 %i3156, ptr @.str.1718, i32 signext undef)
-  %i3157 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2929, i64 0, i32 3, i32 2), align 2, !tbaa !46
+  %i3157 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2929, i64 0, i32 3, i32 2), align 2, !tbaa !46
   %i3158 = sext i16 %i3157 to i64
   call fastcc void @transparent_crc(i64 %i3158, ptr @.str.1719, i32 signext undef)
-  %i3159 = load i64, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2929, i64 0, i32 3, i32 3), align 2, !tbaa !47
+  %i3159 = load i64, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2929, i64 0, i32 3, i32 3), align 2, !tbaa !47
   call fastcc void @transparent_crc(i64 %i3159, ptr @.str.1720, i32 signext undef)
-  %i3160 = load i32, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2929, i64 0, i32 3, i32 4), align 2, !tbaa !48
+  %i3160 = load i32, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2929, i64 0, i32 3, i32 4), align 2, !tbaa !48
   %i3161 = sext i32 %i3160 to i64
   call fastcc void @transparent_crc(i64 %i3161, ptr @.str.1721, i32 signext undef)
-  %i3162 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2929, i64 0, i32 4, i32 0), align 2
+  %i3162 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2929, i64 0, i32 4, i32 0), align 2
   %i3163 = ashr i128 %i3162, 99
   %i3164 = shl nsw i128 %i3163, 32
   %i3165 = trunc i128 %i3164 to i64
   %i3166 = ashr exact i64 %i3165, 32
   call fastcc void @transparent_crc(i64 %i3166, ptr @.str.1722, i32 signext undef)
-  %i3167 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2929, i64 0, i32 4, i32 0), align 2
+  %i3167 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2929, i64 0, i32 4, i32 0), align 2
   %i3168 = shl i128 %i3167, 29
   %i3169 = ashr i128 %i3168, 97
   %i3170 = shl nsw i128 %i3169, 32
   %i3171 = trunc i128 %i3170 to i64
   %i3172 = ashr exact i64 %i3171, 32
   call fastcc void @transparent_crc(i64 %i3172, ptr @.str.1723, i32 signext undef)
-  %i3173 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2929, i64 0, i32 4, i32 0), align 2
+  %i3173 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2929, i64 0, i32 4, i32 0), align 2
   %i3174 = shl i128 %i3173, 60
   %i3175 = ashr i128 %i3174, 108
   %i3176 = shl nsw i128 %i3175, 32
   %i3177 = trunc i128 %i3176 to i64
   %i3178 = ashr exact i64 %i3177, 32
   call fastcc void @transparent_crc(i64 %i3178, ptr @.str.1724, i32 signext undef)
-  %i3179 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2929, i64 0, i32 4, i32 0), align 2
+  %i3179 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2929, i64 0, i32 4, i32 0), align 2
   %i3180 = shl i128 %i3179, 80
   %i3181 = ashr i128 %i3180, 110
   %i3182 = shl nsw i128 %i3181, 32
   %i3183 = trunc i128 %i3182 to i64
   %i3184 = ashr exact i64 %i3183, 32
   call fastcc void @transparent_crc(i64 %i3184, ptr @.str.1725, i32 signext undef)
-  %i3185 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2929, i64 0, i32 4, i32 0), align 2
+  %i3185 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2929, i64 0, i32 4, i32 0), align 2
   %i3186 = lshr i128 %i3185, 28
   %i3187 = trunc i128 %i3186 to i64
   %i3188 = and i64 %i3187, 3
   call fastcc void @transparent_crc(i64 %i3188, ptr @.str.1726, i32 signext undef)
-  %i3189 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2929, i64 0, i32 4, i32 0), align 2
+  %i3189 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2929, i64 0, i32 4, i32 0), align 2
   %i3190 = shl i128 %i3189, 100
   %i3191 = ashr i128 %i3190, 107
   %i3192 = shl nsw i128 %i3191, 32
   %i3193 = trunc i128 %i3192 to i64
   %i3194 = ashr exact i64 %i3193, 32
   call fastcc void @transparent_crc(i64 %i3194, ptr @.str.1727, i32 signext undef)
-  %i3195 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2929, i64 0, i32 5, i32 0, i32 0), align 2
+  %i3195 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2929, i64 0, i32 5, i32 0, i32 0), align 2
   %i3196 = lshr i80 %i3195, 57
   %i3197 = trunc i80 %i3196 to i64
   call fastcc void @transparent_crc(i64 %i3197, ptr @.str.1728, i32 signext undef)
-  %i3198 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2929, i64 0, i32 5, i32 0, i32 0), align 2
+  %i3198 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2929, i64 0, i32 5, i32 0, i32 0), align 2
   %i3199 = shl i80 %i3198, 23
   %i3200 = ashr i80 %i3199, 64
   %i3201 = shl nsw i80 %i3200, 32
   %i3202 = trunc i80 %i3201 to i64
   %i3203 = ashr exact i64 %i3202, 32
   call fastcc void @transparent_crc(i64 %i3203, ptr @.str.1729, i32 signext undef)
-  %i3204 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2929, i64 0, i32 5, i32 0, i32 0), align 2
+  %i3204 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2929, i64 0, i32 5, i32 0, i32 0), align 2
   %i3205 = shl i80 %i3204, 39
   %i3206 = ashr i80 %i3205, 62
   %i3207 = shl nsw i80 %i3206, 32
   %i3208 = trunc i80 %i3207 to i64
   %i3209 = ashr exact i64 %i3208, 32
   call fastcc void @transparent_crc(i64 %i3209, ptr @.str.1730, i32 signext undef)
-  %i3210 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2929, i64 0, i32 5, i32 0, i32 0), align 2
+  %i3210 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2929, i64 0, i32 5, i32 0, i32 0), align 2
   %i3211 = shl i80 %i3210, 57
   %i3212 = ashr i80 %i3211, 58
   %i3213 = shl nsw i80 %i3212, 32
   %i3214 = trunc i80 %i3213 to i64
   %i3215 = ashr exact i64 %i3214, 32
   call fastcc void @transparent_crc(i64 %i3215, ptr @.str.1731, i32 signext undef)
-  %i3216 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2929, i64 0, i32 5, i32 1), align 2
+  %i3216 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2929, i64 0, i32 5, i32 1), align 2
   %i3217 = lshr i80 %i3216, 49
   %i3218 = trunc i80 %i3217 to i64
   call fastcc void @transparent_crc(i64 %i3218, ptr @.str.1732, i32 signext undef)
-  %i3219 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2929, i64 0, i32 5, i32 1), align 2
+  %i3219 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2929, i64 0, i32 5, i32 1), align 2
   %i3220 = lshr i80 %i3219, 24
   %i3221 = trunc i80 %i3220 to i64
   %i3222 = and i64 %i3221, 33554431
   call fastcc void @transparent_crc(i64 %i3222, ptr @.str.1733, i32 signext undef)
-  %i3223 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2929, i64 0, i32 5, i32 1), align 2
+  %i3223 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2929, i64 0, i32 5, i32 1), align 2
   %i3224 = shl i80 %i3223, 56
   %i3225 = ashr i80 %i3224, 68
   %i3226 = shl nsw i80 %i3225, 32
   %i3227 = trunc i80 %i3226 to i64
   %i3228 = ashr exact i64 %i3227, 32
   call fastcc void @transparent_crc(i64 %i3228, ptr @.str.1734, i32 signext undef)
-  %i3229 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2929, i64 0, i32 5, i32 1), align 2
+  %i3229 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2929, i64 0, i32 5, i32 1), align 2
   %i3230 = lshr i80 %i3229, 11
   %i3231 = trunc i80 %i3230 to i64
   %i3232 = and i64 %i3231, 1
   call fastcc void @transparent_crc(i64 %i3232, ptr @.str.1735, i32 signext undef)
-  %i3233 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2929, i64 0, i32 5, i32 1), align 2
+  %i3233 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2929, i64 0, i32 5, i32 1), align 2
   %i3234 = shl i80 %i3233, 69
   %i3235 = ashr i80 %i3234, 72
   %i3236 = shl nsw i80 %i3235, 32
   %i3237 = trunc i80 %i3236 to i64
   %i3238 = ashr exact i64 %i3237, 32
   call fastcc void @transparent_crc(i64 %i3238, ptr @.str.1736, i32 signext undef)
-  %i3239 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2929, i64 0, i32 6), align 2, !tbaa !49
+  %i3239 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2929, i64 0, i32 6), align 2, !tbaa !49
   %i3240 = sext i16 %i3239 to i64
   call fastcc void @transparent_crc(i64 %i3240, ptr @.str.1737, i32 signext undef)
-  %i3241 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2929, i64 0, i32 7), align 2, !tbaa !50
+  %i3241 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2929, i64 0, i32 7), align 2, !tbaa !50
   %i3242 = zext i16 %i3241 to i64
   call fastcc void @transparent_crc(i64 %i3242, ptr @.str.1738, i32 signext undef)
-  %i3243 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2930, i64 0, i32 0), align 2, !tbaa !23
+  %i3243 = load i16, ptr @g_2930, align 2, !tbaa !23
   %i3244 = sext i16 %i3243 to i64
   call fastcc void @transparent_crc(i64 %i3244, ptr @.str.1739, i32 signext undef)
-  %i3245 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2930, i64 0, i32 1), align 2, !tbaa !51
+  %i3245 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2930, i64 0, i32 1), align 2, !tbaa !51
   %i3246 = sext i8 %i3245 to i64
   call fastcc void @transparent_crc(i64 %i3246, ptr @.str.1740, i32 signext undef)
-  %i3247 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2930, i64 0, i32 2, i32 0), align 1
+  %i3247 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2930, i64 0, i32 2, i32 0), align 1
   %i3248 = lshr i120 %i3247, 107
   %i3249 = trunc i120 %i3248 to i64
   call fastcc void @transparent_crc(i64 %i3249, ptr @.str.1741, i32 signext undef)
-  %i3250 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2930, i64 0, i32 2, i32 0), align 1
+  %i3250 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2930, i64 0, i32 2, i32 0), align 1
   %i3251 = lshr i120 %i3250, 78
   %i3252 = trunc i120 %i3251 to i64
   %i3253 = and i64 %i3252, 536870911
   call fastcc void @transparent_crc(i64 %i3253, ptr @.str.1742, i32 signext undef)
-  %i3254 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2930, i64 0, i32 2, i32 0), align 1
+  %i3254 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2930, i64 0, i32 2, i32 0), align 1
   %i3255 = shl i120 %i3254, 42
   %i3256 = ashr i120 %i3255, 104
   %i3257 = shl nsw i120 %i3256, 32
   %i3258 = trunc i120 %i3257 to i64
   %i3259 = ashr exact i64 %i3258, 32
   call fastcc void @transparent_crc(i64 %i3259, ptr @.str.1743, i32 signext undef)
-  %i3260 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2930, i64 0, i32 2, i32 0), align 1
+  %i3260 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2930, i64 0, i32 2, i32 0), align 1
   %i3261 = shl i120 %i3260, 58
   %i3262 = ashr i120 %i3261, 105
   %i3263 = shl nsw i120 %i3262, 32
   %i3264 = trunc i120 %i3263 to i64
   %i3265 = ashr exact i64 %i3264, 32
   call fastcc void @transparent_crc(i64 %i3265, ptr @.str.1744, i32 signext undef)
-  %i3266 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2930, i64 0, i32 2, i32 0), align 1
+  %i3266 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2930, i64 0, i32 2, i32 0), align 1
   %i3267 = lshr i120 %i3266, 41
   %i3268 = trunc i120 %i3267 to i64
   %i3269 = and i64 %i3268, 63
   call fastcc void @transparent_crc(i64 %i3269, ptr @.str.1745, i32 signext undef)
-  %i3270 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2930, i64 0, i32 2, i32 0), align 1
+  %i3270 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2930, i64 0, i32 2, i32 0), align 1
   %i3271 = lshr i120 %i3270, 19
   %i3272 = trunc i120 %i3271 to i64
   %i3273 = and i64 %i3272, 4194303
   call fastcc void @transparent_crc(i64 %i3273, ptr @.str.1746, i32 signext undef)
-  %i3274 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2930, i64 0, i32 2, i32 0), align 1
+  %i3274 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2930, i64 0, i32 2, i32 0), align 1
   %i3275 = shl i120 %i3274, 101
   %i3276 = ashr exact i120 %i3275, 69
   %i3277 = trunc i120 %i3276 to i64
   %i3278 = ashr exact i64 %i3277, 32
   call fastcc void @transparent_crc(i64 %i3278, ptr @.str.1747, i32 signext undef)
-  %i3279 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2930, i64 0, i32 3, i32 0), align 2, !tbaa !44
+  %i3279 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2930, i64 0, i32 3, i32 0), align 2, !tbaa !44
   %i3280 = zext i8 %i3279 to i64
   call fastcc void @transparent_crc(i64 %i3280, ptr @.str.1748, i32 signext undef)
-  %i3281 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2930, i64 0, i32 3, i32 1), align 1, !tbaa !45
+  %i3281 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2930, i64 0, i32 3, i32 1), align 1, !tbaa !45
   %i3282 = sext i8 %i3281 to i64
   call fastcc void @transparent_crc(i64 %i3282, ptr @.str.1749, i32 signext undef)
-  %i3283 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2930, i64 0, i32 3, i32 2), align 2, !tbaa !46
+  %i3283 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2930, i64 0, i32 3, i32 2), align 2, !tbaa !46
   %i3284 = sext i16 %i3283 to i64
   call fastcc void @transparent_crc(i64 %i3284, ptr @.str.1750, i32 signext undef)
-  %i3285 = load i64, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2930, i64 0, i32 3, i32 3), align 2, !tbaa !47
+  %i3285 = load i64, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2930, i64 0, i32 3, i32 3), align 2, !tbaa !47
   call fastcc void @transparent_crc(i64 %i3285, ptr @.str.1751, i32 signext undef)
-  %i3286 = load i32, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2930, i64 0, i32 3, i32 4), align 2, !tbaa !48
+  %i3286 = load i32, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2930, i64 0, i32 3, i32 4), align 2, !tbaa !48
   %i3287 = sext i32 %i3286 to i64
   call fastcc void @transparent_crc(i64 %i3287, ptr @.str.1752, i32 signext undef)
-  %i3288 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2930, i64 0, i32 4, i32 0), align 2
+  %i3288 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2930, i64 0, i32 4, i32 0), align 2
   %i3289 = ashr i128 %i3288, 99
   %i3290 = shl nsw i128 %i3289, 32
   %i3291 = trunc i128 %i3290 to i64
   %i3292 = ashr exact i64 %i3291, 32
   call fastcc void @transparent_crc(i64 %i3292, ptr @.str.1753, i32 signext undef)
-  %i3293 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2930, i64 0, i32 4, i32 0), align 2
+  %i3293 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2930, i64 0, i32 4, i32 0), align 2
   %i3294 = shl i128 %i3293, 29
   %i3295 = ashr i128 %i3294, 97
   %i3296 = shl nsw i128 %i3295, 32
   %i3297 = trunc i128 %i3296 to i64
   %i3298 = ashr exact i64 %i3297, 32
   call fastcc void @transparent_crc(i64 %i3298, ptr @.str.1754, i32 signext undef)
-  %i3299 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2930, i64 0, i32 4, i32 0), align 2
+  %i3299 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2930, i64 0, i32 4, i32 0), align 2
   %i3300 = shl i128 %i3299, 60
   %i3301 = ashr i128 %i3300, 108
   %i3302 = shl nsw i128 %i3301, 32
   %i3303 = trunc i128 %i3302 to i64
   %i3304 = ashr exact i64 %i3303, 32
   call fastcc void @transparent_crc(i64 %i3304, ptr @.str.1755, i32 signext undef)
-  %i3305 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2930, i64 0, i32 4, i32 0), align 2
+  %i3305 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2930, i64 0, i32 4, i32 0), align 2
   %i3306 = shl i128 %i3305, 80
   %i3307 = ashr i128 %i3306, 110
   %i3308 = shl nsw i128 %i3307, 32
   %i3309 = trunc i128 %i3308 to i64
   %i3310 = ashr exact i64 %i3309, 32
   call fastcc void @transparent_crc(i64 %i3310, ptr @.str.1756, i32 signext undef)
-  %i3311 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2930, i64 0, i32 4, i32 0), align 2
+  %i3311 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2930, i64 0, i32 4, i32 0), align 2
   %i3312 = lshr i128 %i3311, 28
   %i3313 = trunc i128 %i3312 to i64
   %i3314 = and i64 %i3313, 3
   call fastcc void @transparent_crc(i64 %i3314, ptr @.str.1757, i32 signext undef)
   call fastcc void @transparent_crc(i64 0, ptr @.str.1762, i32 signext undef)
-  %i3315 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2930, i64 0, i32 5, i32 1), align 2
+  %i3315 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2930, i64 0, i32 5, i32 1), align 2
   %i3316 = lshr i80 %i3315, 49
   %i3317 = trunc i80 %i3316 to i64
   call fastcc void @transparent_crc(i64 %i3317, ptr @.str.1763, i32 signext undef)
-  %i3318 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2930, i64 0, i32 5, i32 1), align 2
+  %i3318 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2930, i64 0, i32 5, i32 1), align 2
   call fastcc void @transparent_crc(i64 0, ptr @.str.1775, i32 signext undef)
   %i3319 = load volatile i120, ptr undef, align 1
   %i3320 = lshr i120 %i3319, 41
@@ -7675,7 +7675,7 @@ bb25:                                             ; preds = %bb15
   %i3339 = load i32, ptr undef, align 2, !tbaa !48
   %i3340 = sext i32 %i3339 to i64
   call fastcc void @transparent_crc(i64 %i3340, ptr @.str.1783, i32 signext undef)
-  %i3341 = getelementptr inbounds [5 x [4 x [2 x %5]]], ptr bitcast (<{ <{ <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }> }>, <{ <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }> }>, <{ <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }> }>, <{ <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }> }>, <{ <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }> }> }>* @g_2932 to ptr), i64 0, i64 0, i64 0, i64 0, i32 4, i32 0
+  %i3341 = getelementptr inbounds [5 x [4 x [2 x %5]]], ptr @g_2932, i64 0, i64 0, i64 0, i64 0, i32 4, i32 0
   %i3342 = load volatile i128, ptr %i3341, align 2
   %i3343 = ashr i128 %i3342, 99
   %i3344 = shl nsw i128 %i3343, 32
@@ -7791,7 +7791,7 @@ bb25:                                             ; preds = %bb15
   %i3430 = load i32, ptr undef, align 2, !tbaa !48
   %i3431 = sext i32 %i3430 to i64
   call fastcc void @transparent_crc(i64 %i3431, ptr @.str.1814, i32 signext undef)
-  %i3432 = getelementptr inbounds [2 x [2 x %5]], ptr bitcast (<{ <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }> }>* @g_2933 to ptr), i64 0, i64 0, i64 0, i32 4, i32 0
+  %i3432 = getelementptr inbounds [2 x [2 x %5]], ptr @g_2933, i64 0, i64 0, i64 0, i32 4, i32 0
   %i3433 = load volatile i128, ptr %i3432, align 2
   %i3434 = ashr i128 %i3433, 99
   %i3435 = shl nsw i128 %i3434, 32
@@ -7862,161 +7862,161 @@ bb25:                                             ; preds = %bb15
   %i3488 = load i16, ptr undef, align 2, !tbaa !50
   %i3489 = zext i16 %i3488 to i64
   call fastcc void @transparent_crc(i64 %i3489, ptr @.str.1831, i32 signext undef)
-  %i3490 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2934, i64 0, i32 0), align 2, !tbaa !23
+  %i3490 = load i16, ptr @g_2934, align 2, !tbaa !23
   %i3491 = sext i16 %i3490 to i64
   call fastcc void @transparent_crc(i64 %i3491, ptr @.str.1832, i32 signext undef)
-  %i3492 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2934, i64 0, i32 1), align 2, !tbaa !51
+  %i3492 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2934, i64 0, i32 1), align 2, !tbaa !51
   %i3493 = sext i8 %i3492 to i64
   call fastcc void @transparent_crc(i64 %i3493, ptr @.str.1833, i32 signext undef)
-  %i3494 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2934, i64 0, i32 2, i32 0), align 1
+  %i3494 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2934, i64 0, i32 2, i32 0), align 1
   %i3495 = lshr i120 %i3494, 107
   %i3496 = trunc i120 %i3495 to i64
   call fastcc void @transparent_crc(i64 %i3496, ptr @.str.1834, i32 signext undef)
-  %i3497 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2934, i64 0, i32 2, i32 0), align 1
+  %i3497 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2934, i64 0, i32 2, i32 0), align 1
   %i3498 = lshr i120 %i3497, 78
   %i3499 = trunc i120 %i3498 to i64
   %i3500 = and i64 %i3499, 536870911
   call fastcc void @transparent_crc(i64 %i3500, ptr @.str.1835, i32 signext undef)
-  %i3501 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2934, i64 0, i32 2, i32 0), align 1
+  %i3501 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2934, i64 0, i32 2, i32 0), align 1
   %i3502 = shl i120 %i3501, 42
   %i3503 = ashr i120 %i3502, 104
   %i3504 = shl nsw i120 %i3503, 32
   %i3505 = trunc i120 %i3504 to i64
   %i3506 = ashr exact i64 %i3505, 32
   call fastcc void @transparent_crc(i64 %i3506, ptr @.str.1836, i32 signext undef)
-  %i3507 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2934, i64 0, i32 2, i32 0), align 1
+  %i3507 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2934, i64 0, i32 2, i32 0), align 1
   %i3508 = shl i120 %i3507, 58
   %i3509 = ashr i120 %i3508, 105
   %i3510 = shl nsw i120 %i3509, 32
   %i3511 = trunc i120 %i3510 to i64
   %i3512 = ashr exact i64 %i3511, 32
   call fastcc void @transparent_crc(i64 %i3512, ptr @.str.1837, i32 signext undef)
-  %i3513 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2934, i64 0, i32 2, i32 0), align 1
+  %i3513 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2934, i64 0, i32 2, i32 0), align 1
   %i3514 = lshr i120 %i3513, 41
   %i3515 = trunc i120 %i3514 to i64
   %i3516 = and i64 %i3515, 63
   call fastcc void @transparent_crc(i64 %i3516, ptr @.str.1838, i32 signext undef)
-  %i3517 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2934, i64 0, i32 2, i32 0), align 1
+  %i3517 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2934, i64 0, i32 2, i32 0), align 1
   %i3518 = lshr i120 %i3517, 19
   %i3519 = trunc i120 %i3518 to i64
   %i3520 = and i64 %i3519, 4194303
   call fastcc void @transparent_crc(i64 %i3520, ptr @.str.1839, i32 signext undef)
-  %i3521 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2934, i64 0, i32 2, i32 0), align 1
+  %i3521 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2934, i64 0, i32 2, i32 0), align 1
   %i3522 = shl i120 %i3521, 101
   %i3523 = ashr exact i120 %i3522, 69
   %i3524 = trunc i120 %i3523 to i64
   %i3525 = ashr exact i64 %i3524, 32
   call fastcc void @transparent_crc(i64 %i3525, ptr @.str.1840, i32 signext undef)
-  %i3526 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2934, i64 0, i32 3, i32 0), align 2, !tbaa !44
+  %i3526 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2934, i64 0, i32 3, i32 0), align 2, !tbaa !44
   %i3527 = zext i8 %i3526 to i64
   call fastcc void @transparent_crc(i64 %i3527, ptr @.str.1841, i32 signext undef)
-  %i3528 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2934, i64 0, i32 3, i32 1), align 1, !tbaa !45
+  %i3528 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2934, i64 0, i32 3, i32 1), align 1, !tbaa !45
   %i3529 = sext i8 %i3528 to i64
   call fastcc void @transparent_crc(i64 %i3529, ptr @.str.1842, i32 signext undef)
-  %i3530 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2934, i64 0, i32 3, i32 2), align 2, !tbaa !46
+  %i3530 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2934, i64 0, i32 3, i32 2), align 2, !tbaa !46
   %i3531 = sext i16 %i3530 to i64
   call fastcc void @transparent_crc(i64 %i3531, ptr @.str.1843, i32 signext undef)
-  %i3532 = load i64, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2934, i64 0, i32 3, i32 3), align 2, !tbaa !47
+  %i3532 = load i64, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2934, i64 0, i32 3, i32 3), align 2, !tbaa !47
   call fastcc void @transparent_crc(i64 %i3532, ptr @.str.1844, i32 signext undef)
-  %i3533 = load i32, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2934, i64 0, i32 3, i32 4), align 2, !tbaa !48
+  %i3533 = load i32, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2934, i64 0, i32 3, i32 4), align 2, !tbaa !48
   %i3534 = sext i32 %i3533 to i64
   call fastcc void @transparent_crc(i64 %i3534, ptr @.str.1845, i32 signext undef)
-  %i3535 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2934, i64 0, i32 4, i32 0), align 2
+  %i3535 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2934, i64 0, i32 4, i32 0), align 2
   %i3536 = ashr i128 %i3535, 99
   %i3537 = shl nsw i128 %i3536, 32
   %i3538 = trunc i128 %i3537 to i64
   %i3539 = ashr exact i64 %i3538, 32
   call fastcc void @transparent_crc(i64 %i3539, ptr @.str.1846, i32 signext undef)
-  %i3540 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2934, i64 0, i32 4, i32 0), align 2
+  %i3540 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2934, i64 0, i32 4, i32 0), align 2
   %i3541 = shl i128 %i3540, 29
   %i3542 = ashr i128 %i3541, 97
   %i3543 = shl nsw i128 %i3542, 32
   %i3544 = trunc i128 %i3543 to i64
   %i3545 = ashr exact i64 %i3544, 32
   call fastcc void @transparent_crc(i64 %i3545, ptr @.str.1847, i32 signext undef)
-  %i3546 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2934, i64 0, i32 4, i32 0), align 2
+  %i3546 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2934, i64 0, i32 4, i32 0), align 2
   %i3547 = shl i128 %i3546, 60
   %i3548 = ashr i128 %i3547, 108
   %i3549 = shl nsw i128 %i3548, 32
   %i3550 = trunc i128 %i3549 to i64
   %i3551 = ashr exact i64 %i3550, 32
   call fastcc void @transparent_crc(i64 %i3551, ptr @.str.1848, i32 signext undef)
-  %i3552 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2934, i64 0, i32 4, i32 0), align 2
+  %i3552 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2934, i64 0, i32 4, i32 0), align 2
   %i3553 = shl i128 %i3552, 80
   %i3554 = ashr i128 %i3553, 110
   %i3555 = shl nsw i128 %i3554, 32
   %i3556 = trunc i128 %i3555 to i64
   %i3557 = ashr exact i64 %i3556, 32
   call fastcc void @transparent_crc(i64 %i3557, ptr @.str.1849, i32 signext undef)
-  %i3558 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2934, i64 0, i32 4, i32 0), align 2
+  %i3558 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2934, i64 0, i32 4, i32 0), align 2
   %i3559 = lshr i128 %i3558, 28
   %i3560 = trunc i128 %i3559 to i64
   %i3561 = and i64 %i3560, 3
   call fastcc void @transparent_crc(i64 %i3561, ptr @.str.1850, i32 signext undef)
-  %i3562 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2934, i64 0, i32 4, i32 0), align 2
+  %i3562 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2934, i64 0, i32 4, i32 0), align 2
   %i3563 = shl i128 %i3562, 100
   %i3564 = ashr i128 %i3563, 107
   %i3565 = shl nsw i128 %i3564, 32
   %i3566 = trunc i128 %i3565 to i64
   %i3567 = ashr exact i64 %i3566, 32
   call fastcc void @transparent_crc(i64 %i3567, ptr @.str.1851, i32 signext undef)
-  %i3568 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2934, i64 0, i32 5, i32 0, i32 0), align 2
+  %i3568 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2934, i64 0, i32 5, i32 0, i32 0), align 2
   %i3569 = lshr i80 %i3568, 57
   %i3570 = trunc i80 %i3569 to i64
   call fastcc void @transparent_crc(i64 %i3570, ptr @.str.1852, i32 signext undef)
-  %i3571 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2934, i64 0, i32 5, i32 0, i32 0), align 2
+  %i3571 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2934, i64 0, i32 5, i32 0, i32 0), align 2
   %i3572 = shl i80 %i3571, 23
   %i3573 = ashr i80 %i3572, 64
   %i3574 = shl nsw i80 %i3573, 32
   %i3575 = trunc i80 %i3574 to i64
   %i3576 = ashr exact i64 %i3575, 32
   call fastcc void @transparent_crc(i64 %i3576, ptr @.str.1853, i32 signext undef)
-  %i3577 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2934, i64 0, i32 5, i32 0, i32 0), align 2
+  %i3577 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2934, i64 0, i32 5, i32 0, i32 0), align 2
   %i3578 = shl i80 %i3577, 39
   %i3579 = ashr i80 %i3578, 62
   %i3580 = shl nsw i80 %i3579, 32
   %i3581 = trunc i80 %i3580 to i64
   %i3582 = ashr exact i64 %i3581, 32
   call fastcc void @transparent_crc(i64 %i3582, ptr @.str.1854, i32 signext undef)
-  %i3583 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2934, i64 0, i32 5, i32 0, i32 0), align 2
+  %i3583 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2934, i64 0, i32 5, i32 0, i32 0), align 2
   %i3584 = shl i80 %i3583, 57
   %i3585 = ashr i80 %i3584, 58
   %i3586 = shl nsw i80 %i3585, 32
   %i3587 = trunc i80 %i3586 to i64
   %i3588 = ashr exact i64 %i3587, 32
   call fastcc void @transparent_crc(i64 %i3588, ptr @.str.1855, i32 signext undef)
-  %i3589 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2934, i64 0, i32 5, i32 1), align 2
+  %i3589 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2934, i64 0, i32 5, i32 1), align 2
   %i3590 = lshr i80 %i3589, 49
   %i3591 = trunc i80 %i3590 to i64
   call fastcc void @transparent_crc(i64 %i3591, ptr @.str.1856, i32 signext undef)
-  %i3592 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2934, i64 0, i32 5, i32 1), align 2
+  %i3592 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2934, i64 0, i32 5, i32 1), align 2
   %i3593 = lshr i80 %i3592, 24
   %i3594 = trunc i80 %i3593 to i64
   %i3595 = and i64 %i3594, 33554431
   call fastcc void @transparent_crc(i64 %i3595, ptr @.str.1857, i32 signext undef)
-  %i3596 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2934, i64 0, i32 5, i32 1), align 2
+  %i3596 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2934, i64 0, i32 5, i32 1), align 2
   %i3597 = shl i80 %i3596, 56
   %i3598 = ashr i80 %i3597, 68
   %i3599 = shl nsw i80 %i3598, 32
   %i3600 = trunc i80 %i3599 to i64
   %i3601 = ashr exact i64 %i3600, 32
   call fastcc void @transparent_crc(i64 %i3601, ptr @.str.1858, i32 signext undef)
-  %i3602 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2934, i64 0, i32 5, i32 1), align 2
+  %i3602 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2934, i64 0, i32 5, i32 1), align 2
   %i3603 = lshr i80 %i3602, 11
   %i3604 = trunc i80 %i3603 to i64
   %i3605 = and i64 %i3604, 1
   call fastcc void @transparent_crc(i64 %i3605, ptr @.str.1859, i32 signext undef)
-  %i3606 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2934, i64 0, i32 5, i32 1), align 2
+  %i3606 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2934, i64 0, i32 5, i32 1), align 2
   %i3607 = shl i80 %i3606, 69
   %i3608 = ashr i80 %i3607, 72
   %i3609 = shl nsw i80 %i3608, 32
   %i3610 = trunc i80 %i3609 to i64
   %i3611 = ashr exact i64 %i3610, 32
   call fastcc void @transparent_crc(i64 %i3611, ptr @.str.1860, i32 signext undef)
-  %i3612 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2934, i64 0, i32 6), align 2, !tbaa !49
+  %i3612 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2934, i64 0, i32 6), align 2, !tbaa !49
   %i3613 = sext i16 %i3612 to i64
   call fastcc void @transparent_crc(i64 %i3613, ptr @.str.1861, i32 signext undef)
-  %i3614 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2934, i64 0, i32 7), align 2, !tbaa !50
+  %i3614 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2934, i64 0, i32 7), align 2, !tbaa !50
   %i3615 = zext i16 %i3614 to i64
   call fastcc void @transparent_crc(i64 %i3615, ptr @.str.1862, i32 signext undef)
   %i3616 = load i16, ptr undef, align 2, !tbaa !23
@@ -8054,7 +8054,7 @@ bb25:                                             ; preds = %bb15
   %i3641 = and i64 %i3640, 4194303
   call fastcc void @transparent_crc(i64 %i3641, ptr @.str.1870, i32 signext undef)
   call fastcc void @transparent_crc(i64 undef, ptr @.str.1876, i32 signext undef)
-  %i3642 = getelementptr inbounds [2 x [1 x [8 x %5]]], ptr bitcast (<{ <{ <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }> }>, <{ <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }> }> }>* @g_2935 to ptr), i64 0, i64 0, i64 0, i64 0, i32 4, i32 0
+  %i3642 = getelementptr inbounds [2 x [1 x [8 x %5]]], ptr @g_2935, i64 0, i64 0, i64 0, i64 0, i32 4, i32 0
   %i3643 = load volatile i128, ptr %i3642, align 2
   %i3644 = ashr i128 %i3643, 99
   %i3645 = shl nsw i128 %i3644, 32
@@ -8114,237 +8114,237 @@ bb25:                                             ; preds = %bb15
   call fastcc void @transparent_crc(i64 %i3690, ptr @.str.1885, i32 signext undef)
   %i3691 = load volatile i80, ptr undef, align 2
   call fastcc void @transparent_crc(i64 undef, ptr @.str.1921, i32 signext undef)
-  %i3692 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2936, i64 0, i32 5, i32 1), align 2
+  %i3692 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2936, i64 0, i32 5, i32 1), align 2
   %i3693 = shl i80 %i3692, 69
   %i3694 = ashr i80 %i3693, 72
   %i3695 = shl nsw i80 %i3694, 32
   %i3696 = trunc i80 %i3695 to i64
   %i3697 = ashr exact i64 %i3696, 32
   call fastcc void @transparent_crc(i64 %i3697, ptr @.str.1922, i32 signext undef)
-  %i3698 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2936, i64 0, i32 6), align 2, !tbaa !49
+  %i3698 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2936, i64 0, i32 6), align 2, !tbaa !49
   %i3699 = sext i16 %i3698 to i64
   call fastcc void @transparent_crc(i64 %i3699, ptr @.str.1923, i32 signext undef)
-  %i3700 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2936, i64 0, i32 7), align 2, !tbaa !50
+  %i3700 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2936, i64 0, i32 7), align 2, !tbaa !50
   %i3701 = zext i16 %i3700 to i64
   call fastcc void @transparent_crc(i64 %i3701, ptr @.str.1924, i32 signext undef)
-  %i3702 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2937, i64 0, i32 0), align 2, !tbaa !23
+  %i3702 = load i16, ptr @g_2937, align 2, !tbaa !23
   %i3703 = sext i16 %i3702 to i64
   call fastcc void @transparent_crc(i64 %i3703, ptr @.str.1925, i32 signext undef)
-  %i3704 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2937, i64 0, i32 1), align 2, !tbaa !51
+  %i3704 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2937, i64 0, i32 1), align 2, !tbaa !51
   %i3705 = sext i8 %i3704 to i64
   call fastcc void @transparent_crc(i64 %i3705, ptr @.str.1926, i32 signext undef)
-  %i3706 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2937, i64 0, i32 2, i32 0), align 1
+  %i3706 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2937, i64 0, i32 2, i32 0), align 1
   %i3707 = lshr i120 %i3706, 107
   %i3708 = trunc i120 %i3707 to i64
   call fastcc void @transparent_crc(i64 %i3708, ptr @.str.1927, i32 signext undef)
-  %i3709 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2937, i64 0, i32 2, i32 0), align 1
+  %i3709 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2937, i64 0, i32 2, i32 0), align 1
   %i3710 = lshr i120 %i3709, 78
   %i3711 = trunc i120 %i3710 to i64
   %i3712 = and i64 %i3711, 536870911
   call fastcc void @transparent_crc(i64 %i3712, ptr @.str.1928, i32 signext undef)
-  %i3713 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2937, i64 0, i32 2, i32 0), align 1
+  %i3713 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2937, i64 0, i32 2, i32 0), align 1
   %i3714 = shl i120 %i3713, 42
   %i3715 = ashr i120 %i3714, 104
   %i3716 = shl nsw i120 %i3715, 32
   %i3717 = trunc i120 %i3716 to i64
   %i3718 = ashr exact i64 %i3717, 32
   call fastcc void @transparent_crc(i64 %i3718, ptr @.str.1929, i32 signext undef)
-  %i3719 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2937, i64 0, i32 2, i32 0), align 1
+  %i3719 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2937, i64 0, i32 2, i32 0), align 1
   %i3720 = shl i120 %i3719, 58
   %i3721 = ashr i120 %i3720, 105
   %i3722 = shl nsw i120 %i3721, 32
   %i3723 = trunc i120 %i3722 to i64
   %i3724 = ashr exact i64 %i3723, 32
   call fastcc void @transparent_crc(i64 %i3724, ptr @.str.1930, i32 signext undef)
-  %i3725 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2937, i64 0, i32 2, i32 0), align 1
+  %i3725 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2937, i64 0, i32 2, i32 0), align 1
   %i3726 = lshr i120 %i3725, 41
   %i3727 = trunc i120 %i3726 to i64
   %i3728 = and i64 %i3727, 63
   call fastcc void @transparent_crc(i64 %i3728, ptr @.str.1931, i32 signext undef)
-  %i3729 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2937, i64 0, i32 2, i32 0), align 1
+  %i3729 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2937, i64 0, i32 2, i32 0), align 1
   %i3730 = lshr i120 %i3729, 19
   %i3731 = trunc i120 %i3730 to i64
   %i3732 = and i64 %i3731, 4194303
   call fastcc void @transparent_crc(i64 %i3732, ptr @.str.1932, i32 signext undef)
-  %i3733 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2937, i64 0, i32 2, i32 0), align 1
+  %i3733 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2937, i64 0, i32 2, i32 0), align 1
   %i3734 = shl i120 %i3733, 101
   %i3735 = ashr exact i120 %i3734, 69
   %i3736 = trunc i120 %i3735 to i64
   %i3737 = ashr exact i64 %i3736, 32
   call fastcc void @transparent_crc(i64 %i3737, ptr @.str.1933, i32 signext undef)
-  %i3738 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2937, i64 0, i32 3, i32 0), align 2, !tbaa !44
+  %i3738 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2937, i64 0, i32 3, i32 0), align 2, !tbaa !44
   %i3739 = zext i8 %i3738 to i64
   call fastcc void @transparent_crc(i64 %i3739, ptr @.str.1934, i32 signext undef)
-  %i3740 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2937, i64 0, i32 3, i32 1), align 1, !tbaa !45
+  %i3740 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2937, i64 0, i32 3, i32 1), align 1, !tbaa !45
   %i3741 = sext i8 %i3740 to i64
   call fastcc void @transparent_crc(i64 %i3741, ptr @.str.1935, i32 signext undef)
-  %i3742 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2937, i64 0, i32 3, i32 2), align 2, !tbaa !46
+  %i3742 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2937, i64 0, i32 3, i32 2), align 2, !tbaa !46
   %i3743 = sext i16 %i3742 to i64
   call fastcc void @transparent_crc(i64 %i3743, ptr @.str.1936, i32 signext undef)
-  %i3744 = load i64, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2937, i64 0, i32 3, i32 3), align 2, !tbaa !47
+  %i3744 = load i64, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2937, i64 0, i32 3, i32 3), align 2, !tbaa !47
   call fastcc void @transparent_crc(i64 %i3744, ptr @.str.1937, i32 signext undef)
   call fastcc void @transparent_crc(i64 undef, ptr @.str.1953, i32 signext undef)
-  %i3745 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2937, i64 0, i32 6), align 2, !tbaa !49
+  %i3745 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2937, i64 0, i32 6), align 2, !tbaa !49
   %i3746 = sext i16 %i3745 to i64
   call fastcc void @transparent_crc(i64 %i3746, ptr @.str.1954, i32 signext undef)
-  %i3747 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2937, i64 0, i32 7), align 2, !tbaa !50
+  %i3747 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2937, i64 0, i32 7), align 2, !tbaa !50
   %i3748 = zext i16 %i3747 to i64
   call fastcc void @transparent_crc(i64 %i3748, ptr @.str.1955, i32 signext undef)
-  %i3749 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2938, i64 0, i32 0), align 2, !tbaa !23
+  %i3749 = load i16, ptr @g_2938, align 2, !tbaa !23
   %i3750 = sext i16 %i3749 to i64
   call fastcc void @transparent_crc(i64 %i3750, ptr @.str.1956, i32 signext undef)
-  %i3751 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2938, i64 0, i32 1), align 2, !tbaa !51
+  %i3751 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2938, i64 0, i32 1), align 2, !tbaa !51
   %i3752 = sext i8 %i3751 to i64
   call fastcc void @transparent_crc(i64 %i3752, ptr @.str.1957, i32 signext undef)
-  %i3753 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2938, i64 0, i32 2, i32 0), align 1
+  %i3753 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2938, i64 0, i32 2, i32 0), align 1
   %i3754 = lshr i120 %i3753, 107
   %i3755 = trunc i120 %i3754 to i64
   call fastcc void @transparent_crc(i64 %i3755, ptr @.str.1958, i32 signext undef)
-  %i3756 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2938, i64 0, i32 2, i32 0), align 1
+  %i3756 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2938, i64 0, i32 2, i32 0), align 1
   %i3757 = lshr i120 %i3756, 78
   %i3758 = trunc i120 %i3757 to i64
   %i3759 = and i64 %i3758, 536870911
   call fastcc void @transparent_crc(i64 %i3759, ptr @.str.1959, i32 signext undef)
-  %i3760 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2938, i64 0, i32 2, i32 0), align 1
+  %i3760 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2938, i64 0, i32 2, i32 0), align 1
   %i3761 = shl i120 %i3760, 42
   %i3762 = ashr i120 %i3761, 104
   %i3763 = shl nsw i120 %i3762, 32
   %i3764 = trunc i120 %i3763 to i64
   %i3765 = ashr exact i64 %i3764, 32
   call fastcc void @transparent_crc(i64 %i3765, ptr @.str.1960, i32 signext undef)
-  %i3766 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2938, i64 0, i32 2, i32 0), align 1
+  %i3766 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2938, i64 0, i32 2, i32 0), align 1
   %i3767 = shl i120 %i3766, 58
   %i3768 = ashr i120 %i3767, 105
   %i3769 = shl nsw i120 %i3768, 32
   %i3770 = trunc i120 %i3769 to i64
   %i3771 = ashr exact i64 %i3770, 32
   call fastcc void @transparent_crc(i64 %i3771, ptr @.str.1961, i32 signext undef)
-  %i3772 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2938, i64 0, i32 2, i32 0), align 1
+  %i3772 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2938, i64 0, i32 2, i32 0), align 1
   %i3773 = lshr i120 %i3772, 41
   %i3774 = trunc i120 %i3773 to i64
   %i3775 = and i64 %i3774, 63
   call fastcc void @transparent_crc(i64 %i3775, ptr @.str.1962, i32 signext undef)
-  %i3776 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2938, i64 0, i32 2, i32 0), align 1
+  %i3776 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2938, i64 0, i32 2, i32 0), align 1
   %i3777 = lshr i120 %i3776, 19
   %i3778 = trunc i120 %i3777 to i64
   %i3779 = and i64 %i3778, 4194303
   call fastcc void @transparent_crc(i64 %i3779, ptr @.str.1963, i32 signext undef)
-  %i3780 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2938, i64 0, i32 2, i32 0), align 1
+  %i3780 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2938, i64 0, i32 2, i32 0), align 1
   %i3781 = shl i120 %i3780, 101
   %i3782 = ashr exact i120 %i3781, 69
   %i3783 = trunc i120 %i3782 to i64
   %i3784 = ashr exact i64 %i3783, 32
   call fastcc void @transparent_crc(i64 %i3784, ptr @.str.1964, i32 signext undef)
-  %i3785 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2938, i64 0, i32 3, i32 0), align 2, !tbaa !44
+  %i3785 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2938, i64 0, i32 3, i32 0), align 2, !tbaa !44
   %i3786 = zext i8 %i3785 to i64
   call fastcc void @transparent_crc(i64 %i3786, ptr @.str.1965, i32 signext undef)
-  %i3787 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2938, i64 0, i32 3, i32 1), align 1, !tbaa !45
+  %i3787 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2938, i64 0, i32 3, i32 1), align 1, !tbaa !45
   %i3788 = sext i8 %i3787 to i64
   call fastcc void @transparent_crc(i64 %i3788, ptr @.str.1966, i32 signext undef)
-  %i3789 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2938, i64 0, i32 3, i32 2), align 2, !tbaa !46
+  %i3789 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2938, i64 0, i32 3, i32 2), align 2, !tbaa !46
   %i3790 = sext i16 %i3789 to i64
   call fastcc void @transparent_crc(i64 %i3790, ptr @.str.1967, i32 signext undef)
-  %i3791 = load i64, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2938, i64 0, i32 3, i32 3), align 2, !tbaa !47
+  %i3791 = load i64, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2938, i64 0, i32 3, i32 3), align 2, !tbaa !47
   call fastcc void @transparent_crc(i64 %i3791, ptr @.str.1968, i32 signext undef)
-  %i3792 = load i32, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2938, i64 0, i32 3, i32 4), align 2, !tbaa !48
+  %i3792 = load i32, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2938, i64 0, i32 3, i32 4), align 2, !tbaa !48
   %i3793 = sext i32 %i3792 to i64
   call fastcc void @transparent_crc(i64 %i3793, ptr @.str.1969, i32 signext undef)
-  %i3794 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2938, i64 0, i32 4, i32 0), align 2
+  %i3794 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2938, i64 0, i32 4, i32 0), align 2
   %i3795 = ashr i128 %i3794, 99
   %i3796 = shl nsw i128 %i3795, 32
   %i3797 = trunc i128 %i3796 to i64
   %i3798 = ashr exact i64 %i3797, 32
   call fastcc void @transparent_crc(i64 %i3798, ptr @.str.1970, i32 signext undef)
-  %i3799 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2938, i64 0, i32 4, i32 0), align 2
+  %i3799 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2938, i64 0, i32 4, i32 0), align 2
   %i3800 = shl i128 %i3799, 29
   %i3801 = ashr i128 %i3800, 97
   %i3802 = shl nsw i128 %i3801, 32
   %i3803 = trunc i128 %i3802 to i64
   %i3804 = ashr exact i64 %i3803, 32
   call fastcc void @transparent_crc(i64 %i3804, ptr @.str.1971, i32 signext undef)
-  %i3805 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2938, i64 0, i32 4, i32 0), align 2
+  %i3805 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2938, i64 0, i32 4, i32 0), align 2
   %i3806 = shl i128 %i3805, 60
   %i3807 = ashr i128 %i3806, 108
   %i3808 = shl nsw i128 %i3807, 32
   %i3809 = trunc i128 %i3808 to i64
   %i3810 = ashr exact i64 %i3809, 32
   call fastcc void @transparent_crc(i64 %i3810, ptr @.str.1972, i32 signext undef)
-  %i3811 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2938, i64 0, i32 4, i32 0), align 2
+  %i3811 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2938, i64 0, i32 4, i32 0), align 2
   %i3812 = shl i128 %i3811, 80
   %i3813 = ashr i128 %i3812, 110
   %i3814 = shl nsw i128 %i3813, 32
   %i3815 = trunc i128 %i3814 to i64
   %i3816 = ashr exact i64 %i3815, 32
   call fastcc void @transparent_crc(i64 %i3816, ptr @.str.1973, i32 signext undef)
-  %i3817 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2938, i64 0, i32 4, i32 0), align 2
+  %i3817 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2938, i64 0, i32 4, i32 0), align 2
   %i3818 = lshr i128 %i3817, 28
   %i3819 = trunc i128 %i3818 to i64
   %i3820 = and i64 %i3819, 3
   call fastcc void @transparent_crc(i64 %i3820, ptr @.str.1974, i32 signext undef)
-  %i3821 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2938, i64 0, i32 4, i32 0), align 2
+  %i3821 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2938, i64 0, i32 4, i32 0), align 2
   %i3822 = shl i128 %i3821, 100
   %i3823 = ashr i128 %i3822, 107
   %i3824 = shl nsw i128 %i3823, 32
   %i3825 = trunc i128 %i3824 to i64
   %i3826 = ashr exact i64 %i3825, 32
   call fastcc void @transparent_crc(i64 %i3826, ptr @.str.1975, i32 signext undef)
-  %i3827 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2938, i64 0, i32 5, i32 0, i32 0), align 2
+  %i3827 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2938, i64 0, i32 5, i32 0, i32 0), align 2
   %i3828 = lshr i80 %i3827, 57
   %i3829 = trunc i80 %i3828 to i64
   call fastcc void @transparent_crc(i64 %i3829, ptr @.str.1976, i32 signext undef)
-  %i3830 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2938, i64 0, i32 5, i32 0, i32 0), align 2
+  %i3830 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2938, i64 0, i32 5, i32 0, i32 0), align 2
   %i3831 = shl i80 %i3830, 23
   %i3832 = ashr i80 %i3831, 64
   %i3833 = shl nsw i80 %i3832, 32
   %i3834 = trunc i80 %i3833 to i64
   %i3835 = ashr exact i64 %i3834, 32
   call fastcc void @transparent_crc(i64 %i3835, ptr @.str.1977, i32 signext undef)
-  %i3836 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2938, i64 0, i32 5, i32 0, i32 0), align 2
+  %i3836 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2938, i64 0, i32 5, i32 0, i32 0), align 2
   %i3837 = shl i80 %i3836, 39
   %i3838 = ashr i80 %i3837, 62
   %i3839 = shl nsw i80 %i3838, 32
   %i3840 = trunc i80 %i3839 to i64
   %i3841 = ashr exact i64 %i3840, 32
   call fastcc void @transparent_crc(i64 %i3841, ptr @.str.1978, i32 signext undef)
-  %i3842 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2938, i64 0, i32 5, i32 0, i32 0), align 2
+  %i3842 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2938, i64 0, i32 5, i32 0, i32 0), align 2
   %i3843 = shl i80 %i3842, 57
   %i3844 = ashr i80 %i3843, 58
   %i3845 = shl nsw i80 %i3844, 32
   %i3846 = trunc i80 %i3845 to i64
   %i3847 = ashr exact i64 %i3846, 32
   call fastcc void @transparent_crc(i64 %i3847, ptr @.str.1979, i32 signext undef)
-  %i3848 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2938, i64 0, i32 5, i32 1), align 2
+  %i3848 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2938, i64 0, i32 5, i32 1), align 2
   %i3849 = lshr i80 %i3848, 49
   %i3850 = trunc i80 %i3849 to i64
   call fastcc void @transparent_crc(i64 %i3850, ptr @.str.1980, i32 signext undef)
-  %i3851 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2938, i64 0, i32 5, i32 1), align 2
+  %i3851 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2938, i64 0, i32 5, i32 1), align 2
   %i3852 = lshr i80 %i3851, 24
   %i3853 = trunc i80 %i3852 to i64
   %i3854 = and i64 %i3853, 33554431
   call fastcc void @transparent_crc(i64 %i3854, ptr @.str.1981, i32 signext undef)
-  %i3855 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2938, i64 0, i32 5, i32 1), align 2
+  %i3855 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2938, i64 0, i32 5, i32 1), align 2
   %i3856 = shl i80 %i3855, 56
   %i3857 = ashr i80 %i3856, 68
   %i3858 = shl nsw i80 %i3857, 32
   %i3859 = trunc i80 %i3858 to i64
   %i3860 = ashr exact i64 %i3859, 32
   call fastcc void @transparent_crc(i64 %i3860, ptr @.str.1982, i32 signext undef)
-  %i3861 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2938, i64 0, i32 5, i32 1), align 2
+  %i3861 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2938, i64 0, i32 5, i32 1), align 2
   %i3862 = lshr i80 %i3861, 11
   %i3863 = trunc i80 %i3862 to i64
   %i3864 = and i64 %i3863, 1
   call fastcc void @transparent_crc(i64 %i3864, ptr @.str.1983, i32 signext undef)
-  %i3865 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2938, i64 0, i32 5, i32 1), align 2
+  %i3865 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2938, i64 0, i32 5, i32 1), align 2
   %i3866 = shl i80 %i3865, 69
   %i3867 = ashr i80 %i3866, 72
   %i3868 = shl nsw i80 %i3867, 32
   %i3869 = trunc i80 %i3868 to i64
   %i3870 = ashr exact i64 %i3869, 32
   call fastcc void @transparent_crc(i64 %i3870, ptr @.str.1984, i32 signext undef)
-  %i3871 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2938, i64 0, i32 6), align 2, !tbaa !49
+  %i3871 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2938, i64 0, i32 6), align 2, !tbaa !49
   %i3872 = sext i16 %i3871 to i64
   call fastcc void @transparent_crc(i64 %i3872, ptr @.str.1985, i32 signext undef)
-  %i3873 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2938, i64 0, i32 7), align 2, !tbaa !50
+  %i3873 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2938, i64 0, i32 7), align 2, !tbaa !50
   %i3874 = zext i16 %i3873 to i64
   call fastcc void @transparent_crc(i64 %i3874, ptr @.str.1986, i32 signext undef)
   %i3875 = load i16, ptr undef, align 2, !tbaa !23
@@ -8378,7 +8378,7 @@ bb25:                                             ; preds = %bb15
   %i3898 = trunc i128 %i3897 to i64
   %i3899 = ashr exact i64 %i3898, 32
   call fastcc void @transparent_crc(i64 %i3899, ptr @.str.2006, i32 signext undef)
-  %i3900 = getelementptr inbounds [10 x %5], ptr bitcast (<{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>* @g_2939 to ptr), i64 0, i64 0, i32 5
+  %i3900 = getelementptr inbounds [10 x %5], ptr @g_2939, i64 0, i64 0, i32 5
   %i3902 = load volatile i80, ptr %i3900, align 2
   %i3903 = lshr i80 %i3902, 57
   %i3904 = trunc i80 %i3903 to i64
@@ -8397,216 +8397,216 @@ bb25:                                             ; preds = %bb15
   %i3915 = trunc i80 %i3914 to i64
   %i3916 = ashr exact i64 %i3915, 32
   call fastcc void @transparent_crc(i64 %i3916, ptr @.str.2009, i32 signext undef)
-  %i3917 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2940, i64 0, i32 3, i32 1), align 1, !tbaa !45
+  %i3917 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2940, i64 0, i32 3, i32 1), align 1, !tbaa !45
   %i3918 = sext i8 %i3917 to i64
   call fastcc void @transparent_crc(i64 %i3918, ptr @.str.2028, i32 signext undef)
-  %i3919 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2940, i64 0, i32 3, i32 2), align 2, !tbaa !46
+  %i3919 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2940, i64 0, i32 3, i32 2), align 2, !tbaa !46
   %i3920 = sext i16 %i3919 to i64
   call fastcc void @transparent_crc(i64 %i3920, ptr @.str.2029, i32 signext undef)
-  %i3921 = load i64, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2940, i64 0, i32 3, i32 3), align 2, !tbaa !47
+  %i3921 = load i64, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2940, i64 0, i32 3, i32 3), align 2, !tbaa !47
   call fastcc void @transparent_crc(i64 %i3921, ptr @.str.2030, i32 signext undef)
-  %i3922 = load i32, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2940, i64 0, i32 3, i32 4), align 2, !tbaa !48
+  %i3922 = load i32, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2940, i64 0, i32 3, i32 4), align 2, !tbaa !48
   %i3923 = sext i32 %i3922 to i64
   call fastcc void @transparent_crc(i64 %i3923, ptr @.str.2031, i32 signext undef)
-  %i3924 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2940, i64 0, i32 4, i32 0), align 2
+  %i3924 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2940, i64 0, i32 4, i32 0), align 2
   %i3925 = ashr i128 %i3924, 99
   %i3926 = shl nsw i128 %i3925, 32
   %i3927 = trunc i128 %i3926 to i64
   %i3928 = ashr exact i64 %i3927, 32
   call fastcc void @transparent_crc(i64 %i3928, ptr @.str.2032, i32 signext undef)
-  %i3929 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2940, i64 0, i32 4, i32 0), align 2
+  %i3929 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2940, i64 0, i32 4, i32 0), align 2
   %i3930 = shl i128 %i3929, 29
   %i3931 = ashr i128 %i3930, 97
   %i3932 = shl nsw i128 %i3931, 32
   %i3933 = trunc i128 %i3932 to i64
   %i3934 = ashr exact i64 %i3933, 32
   call fastcc void @transparent_crc(i64 %i3934, ptr @.str.2033, i32 signext undef)
-  %i3935 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2940, i64 0, i32 4, i32 0), align 2
+  %i3935 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2940, i64 0, i32 4, i32 0), align 2
   %i3936 = shl i128 %i3935, 60
   %i3937 = ashr i128 %i3936, 108
   %i3938 = shl nsw i128 %i3937, 32
   %i3939 = trunc i128 %i3938 to i64
   %i3940 = ashr exact i64 %i3939, 32
   call fastcc void @transparent_crc(i64 %i3940, ptr @.str.2034, i32 signext undef)
-  %i3941 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2940, i64 0, i32 4, i32 0), align 2
+  %i3941 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2940, i64 0, i32 4, i32 0), align 2
   %i3942 = shl i128 %i3941, 80
   %i3943 = ashr i128 %i3942, 110
   %i3944 = shl nsw i128 %i3943, 32
   %i3945 = trunc i128 %i3944 to i64
   %i3946 = ashr exact i64 %i3945, 32
   call fastcc void @transparent_crc(i64 %i3946, ptr @.str.2035, i32 signext undef)
-  %i3947 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2940, i64 0, i32 4, i32 0), align 2
+  %i3947 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2940, i64 0, i32 4, i32 0), align 2
   %i3948 = lshr i128 %i3947, 28
   %i3949 = trunc i128 %i3948 to i64
   %i3950 = and i64 %i3949, 3
   call fastcc void @transparent_crc(i64 %i3950, ptr @.str.2036, i32 signext undef)
-  %i3951 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2940, i64 0, i32 4, i32 0), align 2
+  %i3951 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2940, i64 0, i32 4, i32 0), align 2
   %i3952 = shl i128 %i3951, 100
   %i3953 = ashr i128 %i3952, 107
   %i3954 = shl nsw i128 %i3953, 32
   %i3955 = trunc i128 %i3954 to i64
   %i3956 = ashr exact i64 %i3955, 32
   call fastcc void @transparent_crc(i64 %i3956, ptr @.str.2037, i32 signext undef)
-  %i3957 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2940, i64 0, i32 5, i32 0, i32 0), align 2
+  %i3957 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2940, i64 0, i32 5, i32 0, i32 0), align 2
   %i3958 = lshr i80 %i3957, 57
   %i3959 = trunc i80 %i3958 to i64
   call fastcc void @transparent_crc(i64 %i3959, ptr @.str.2038, i32 signext undef)
-  %i3960 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2940, i64 0, i32 5, i32 0, i32 0), align 2
+  %i3960 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2940, i64 0, i32 5, i32 0, i32 0), align 2
   %i3961 = shl i80 %i3960, 23
   %i3962 = ashr i80 %i3961, 64
   %i3963 = shl nsw i80 %i3962, 32
   %i3964 = trunc i80 %i3963 to i64
   %i3965 = ashr exact i64 %i3964, 32
   call fastcc void @transparent_crc(i64 %i3965, ptr @.str.2039, i32 signext undef)
-  %i3966 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2940, i64 0, i32 5, i32 0, i32 0), align 2
+  %i3966 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2940, i64 0, i32 5, i32 0, i32 0), align 2
   %i3967 = shl i80 %i3966, 39
   %i3968 = ashr i80 %i3967, 62
   %i3969 = shl nsw i80 %i3968, 32
   %i3970 = trunc i80 %i3969 to i64
   %i3971 = ashr exact i64 %i3970, 32
   call fastcc void @transparent_crc(i64 %i3971, ptr @.str.2040, i32 signext undef)
-  %i3972 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2940, i64 0, i32 5, i32 0, i32 0), align 2
+  %i3972 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2940, i64 0, i32 5, i32 0, i32 0), align 2
   %i3973 = shl i80 %i3972, 57
   %i3974 = ashr i80 %i3973, 58
   %i3975 = shl nsw i80 %i3974, 32
   %i3976 = trunc i80 %i3975 to i64
   %i3977 = ashr exact i64 %i3976, 32
   call fastcc void @transparent_crc(i64 %i3977, ptr @.str.2041, i32 signext undef)
-  %i3978 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2940, i64 0, i32 5, i32 1), align 2
+  %i3978 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2940, i64 0, i32 5, i32 1), align 2
   %i3979 = lshr i80 %i3978, 49
   %i3980 = trunc i80 %i3979 to i64
   call fastcc void @transparent_crc(i64 %i3980, ptr @.str.2042, i32 signext undef)
-  %i3981 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2940, i64 0, i32 5, i32 1), align 2
+  %i3981 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2940, i64 0, i32 5, i32 1), align 2
   %i3982 = lshr i80 %i3981, 24
   %i3983 = trunc i80 %i3982 to i64
   %i3984 = and i64 %i3983, 33554431
   call fastcc void @transparent_crc(i64 %i3984, ptr @.str.2043, i32 signext undef)
-  %i3985 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2940, i64 0, i32 5, i32 1), align 2
+  %i3985 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2940, i64 0, i32 5, i32 1), align 2
   %i3986 = shl i80 %i3985, 56
   %i3987 = ashr i80 %i3986, 68
   %i3988 = shl nsw i80 %i3987, 32
   %i3989 = trunc i80 %i3988 to i64
   %i3990 = ashr exact i64 %i3989, 32
   call fastcc void @transparent_crc(i64 %i3990, ptr @.str.2044, i32 signext undef)
-  %i3991 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2940, i64 0, i32 5, i32 1), align 2
+  %i3991 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2940, i64 0, i32 5, i32 1), align 2
   %i3992 = lshr i80 %i3991, 11
   %i3993 = trunc i80 %i3992 to i64
   %i3994 = and i64 %i3993, 1
   call fastcc void @transparent_crc(i64 %i3994, ptr @.str.2045, i32 signext undef)
-  %i3995 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2940, i64 0, i32 5, i32 1), align 2
+  %i3995 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2940, i64 0, i32 5, i32 1), align 2
   %i3996 = shl i80 %i3995, 69
   %i3997 = ashr i80 %i3996, 72
   %i3998 = shl nsw i80 %i3997, 32
   %i3999 = trunc i80 %i3998 to i64
   %i4000 = ashr exact i64 %i3999, 32
   call fastcc void @transparent_crc(i64 %i4000, ptr @.str.2046, i32 signext undef)
-  %i4001 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2940, i64 0, i32 6), align 2, !tbaa !49
+  %i4001 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2940, i64 0, i32 6), align 2, !tbaa !49
   %i4002 = sext i16 %i4001 to i64
   call fastcc void @transparent_crc(i64 %i4002, ptr @.str.2047, i32 signext undef)
-  %i4003 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2940, i64 0, i32 7), align 2, !tbaa !50
+  %i4003 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2940, i64 0, i32 7), align 2, !tbaa !50
   %i4004 = zext i16 %i4003 to i64
   call fastcc void @transparent_crc(i64 %i4004, ptr @.str.2048, i32 signext undef)
-  %i4005 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2941, i64 0, i32 0), align 2, !tbaa !23
+  %i4005 = load i16, ptr @g_2941, align 2, !tbaa !23
   %i4006 = sext i16 %i4005 to i64
   call fastcc void @transparent_crc(i64 %i4006, ptr @.str.2049, i32 signext undef)
-  %i4007 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2941, i64 0, i32 1), align 2, !tbaa !51
+  %i4007 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2941, i64 0, i32 1), align 2, !tbaa !51
   %i4008 = sext i8 %i4007 to i64
   call fastcc void @transparent_crc(i64 %i4008, ptr @.str.2050, i32 signext undef)
-  %i4009 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2941, i64 0, i32 2, i32 0), align 1
+  %i4009 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2941, i64 0, i32 2, i32 0), align 1
   %i4010 = lshr i120 %i4009, 107
   %i4011 = trunc i120 %i4010 to i64
   call fastcc void @transparent_crc(i64 %i4011, ptr @.str.2051, i32 signext undef)
-  %i4012 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2941, i64 0, i32 2, i32 0), align 1
+  %i4012 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2941, i64 0, i32 2, i32 0), align 1
   %i4013 = lshr i120 %i4012, 78
   %i4014 = trunc i120 %i4013 to i64
   %i4015 = and i64 %i4014, 536870911
   call fastcc void @transparent_crc(i64 %i4015, ptr @.str.2052, i32 signext undef)
-  %i4016 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2941, i64 0, i32 2, i32 0), align 1
+  %i4016 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2941, i64 0, i32 2, i32 0), align 1
   %i4017 = shl i120 %i4016, 42
   %i4018 = ashr i120 %i4017, 104
   %i4019 = shl nsw i120 %i4018, 32
   %i4020 = trunc i120 %i4019 to i64
   %i4021 = ashr exact i64 %i4020, 32
   call fastcc void @transparent_crc(i64 %i4021, ptr @.str.2053, i32 signext undef)
-  %i4022 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2941, i64 0, i32 2, i32 0), align 1
+  %i4022 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2941, i64 0, i32 2, i32 0), align 1
   %i4023 = shl i120 %i4022, 58
   %i4024 = ashr i120 %i4023, 105
   %i4025 = shl nsw i120 %i4024, 32
   %i4026 = trunc i120 %i4025 to i64
   %i4027 = ashr exact i64 %i4026, 32
   call fastcc void @transparent_crc(i64 %i4027, ptr @.str.2054, i32 signext undef)
-  %i4028 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2941, i64 0, i32 2, i32 0), align 1
+  %i4028 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2941, i64 0, i32 2, i32 0), align 1
   call fastcc void @transparent_crc(i64 undef, ptr @.str.2065, i32 signext undef)
   call fastcc void @transparent_crc(i64 0, ptr @.str.2066, i32 signext undef)
-  %i4029 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2941, i64 0, i32 4, i32 0), align 2
+  %i4029 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2941, i64 0, i32 4, i32 0), align 2
   %i4030 = lshr i128 %i4029, 28
   %i4031 = trunc i128 %i4030 to i64
   %i4032 = and i64 %i4031, 3
   call fastcc void @transparent_crc(i64 %i4032, ptr @.str.2067, i32 signext undef)
-  %i4033 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2941, i64 0, i32 4, i32 0), align 2
+  %i4033 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2941, i64 0, i32 4, i32 0), align 2
   %i4034 = shl i128 %i4033, 100
   %i4035 = ashr i128 %i4034, 107
   %i4036 = shl nsw i128 %i4035, 32
   %i4037 = trunc i128 %i4036 to i64
   %i4038 = ashr exact i64 %i4037, 32
   call fastcc void @transparent_crc(i64 %i4038, ptr @.str.2068, i32 signext undef)
-  %i4039 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2941, i64 0, i32 5, i32 0, i32 0), align 2
+  %i4039 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2941, i64 0, i32 5, i32 0, i32 0), align 2
   %i4040 = lshr i80 %i4039, 57
   %i4041 = trunc i80 %i4040 to i64
   call fastcc void @transparent_crc(i64 %i4041, ptr @.str.2069, i32 signext undef)
-  %i4042 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2941, i64 0, i32 5, i32 0, i32 0), align 2
+  %i4042 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2941, i64 0, i32 5, i32 0, i32 0), align 2
   %i4043 = shl i80 %i4042, 23
   %i4044 = ashr i80 %i4043, 64
   %i4045 = shl nsw i80 %i4044, 32
   %i4046 = trunc i80 %i4045 to i64
   %i4047 = ashr exact i64 %i4046, 32
   call fastcc void @transparent_crc(i64 %i4047, ptr @.str.2070, i32 signext undef)
-  %i4048 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2941, i64 0, i32 5, i32 0, i32 0), align 2
+  %i4048 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2941, i64 0, i32 5, i32 0, i32 0), align 2
   %i4049 = shl i80 %i4048, 39
   %i4050 = ashr i80 %i4049, 62
   %i4051 = shl nsw i80 %i4050, 32
   %i4052 = trunc i80 %i4051 to i64
   %i4053 = ashr exact i64 %i4052, 32
   call fastcc void @transparent_crc(i64 %i4053, ptr @.str.2071, i32 signext undef)
-  %i4054 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2941, i64 0, i32 5, i32 0, i32 0), align 2
+  %i4054 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2941, i64 0, i32 5, i32 0, i32 0), align 2
   %i4055 = shl i80 %i4054, 57
   %i4056 = ashr i80 %i4055, 58
   %i4057 = shl nsw i80 %i4056, 32
   %i4058 = trunc i80 %i4057 to i64
   %i4059 = ashr exact i64 %i4058, 32
   call fastcc void @transparent_crc(i64 %i4059, ptr @.str.2072, i32 signext undef)
-  %i4060 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2941, i64 0, i32 5, i32 1), align 2
+  %i4060 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2941, i64 0, i32 5, i32 1), align 2
   %i4061 = lshr i80 %i4060, 49
   %i4062 = trunc i80 %i4061 to i64
   call fastcc void @transparent_crc(i64 %i4062, ptr @.str.2073, i32 signext undef)
-  %i4063 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2941, i64 0, i32 5, i32 1), align 2
+  %i4063 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2941, i64 0, i32 5, i32 1), align 2
   %i4064 = lshr i80 %i4063, 24
   %i4065 = trunc i80 %i4064 to i64
   %i4066 = and i64 %i4065, 33554431
   call fastcc void @transparent_crc(i64 %i4066, ptr @.str.2074, i32 signext undef)
-  %i4067 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2941, i64 0, i32 5, i32 1), align 2
+  %i4067 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2941, i64 0, i32 5, i32 1), align 2
   %i4068 = shl i80 %i4067, 56
   %i4069 = ashr i80 %i4068, 68
   %i4070 = shl nsw i80 %i4069, 32
   %i4071 = trunc i80 %i4070 to i64
   %i4072 = ashr exact i64 %i4071, 32
   call fastcc void @transparent_crc(i64 %i4072, ptr @.str.2075, i32 signext undef)
-  %i4073 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2941, i64 0, i32 5, i32 1), align 2
+  %i4073 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2941, i64 0, i32 5, i32 1), align 2
   %i4074 = lshr i80 %i4073, 11
   %i4075 = trunc i80 %i4074 to i64
   %i4076 = and i64 %i4075, 1
   call fastcc void @transparent_crc(i64 %i4076, ptr @.str.2076, i32 signext undef)
-  %i4077 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2941, i64 0, i32 5, i32 1), align 2
+  %i4077 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2941, i64 0, i32 5, i32 1), align 2
   %i4078 = shl i80 %i4077, 69
   %i4079 = ashr i80 %i4078, 72
   %i4080 = shl nsw i80 %i4079, 32
   %i4081 = trunc i80 %i4080 to i64
   %i4082 = ashr exact i64 %i4081, 32
   call fastcc void @transparent_crc(i64 %i4082, ptr @.str.2077, i32 signext undef)
-  %i4083 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2941, i64 0, i32 6), align 2, !tbaa !49
+  %i4083 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2941, i64 0, i32 6), align 2, !tbaa !49
   %i4084 = sext i16 %i4083 to i64
   call fastcc void @transparent_crc(i64 %i4084, ptr @.str.2078, i32 signext undef)
-  %i4085 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2941, i64 0, i32 7), align 2, !tbaa !50
+  %i4085 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2941, i64 0, i32 7), align 2, !tbaa !50
   %i4086 = zext i16 %i4085 to i64
   call fastcc void @transparent_crc(i64 %i4086, ptr @.str.2079, i32 signext undef)
   %i4087 = load i16, ptr undef, align 2, !tbaa !23
@@ -8668,7 +8668,7 @@ bb25:                                             ; preds = %bb15
   %i4130 = load i32, ptr undef, align 2, !tbaa !48
   %i4131 = sext i32 %i4130 to i64
   call fastcc void @transparent_crc(i64 %i4131, ptr @.str.2093, i32 signext undef)
-  %i4132 = getelementptr inbounds [6 x [7 x [6 x %5]]], ptr bitcast (<{ <{ <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }> }>, <{ <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }> }>, <{ <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }> }>, <{ <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }> }>, <{ <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }> }>, <{ <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }> }> }>* @g_2942 to ptr), i64 0, i64 0, i64 0, i64 0, i32 4, i32 0
+  %i4132 = getelementptr inbounds [6 x [7 x [6 x %5]]], ptr @g_2942, i64 0, i64 0, i64 0, i64 0, i32 4, i32 0
   %i4133 = load volatile i128, ptr %i4132, align 2
   %i4134 = ashr i128 %i4133, 99
   %i4135 = shl nsw i128 %i4134, 32
@@ -8718,7 +8718,7 @@ bb25:                                             ; preds = %bb15
   %i4171 = load i16, ptr undef, align 2, !tbaa !23
   %i4172 = sext i16 %i4171 to i64
   call fastcc void @transparent_crc(i64 %i4172, ptr @.str.2111, i32 signext undef)
-  %i4173 = getelementptr inbounds [6 x [10 x [4 x %5]]], ptr bitcast (<{ <{ <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }> }>, <{ <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }> }>, <{ <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }> }>, <{ <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }> }>, <{ <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }> }>, <{ <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }> }> }>* @g_2943 to ptr), i64 0, i64 0, i64 0, i64 0, i32 1
+  %i4173 = getelementptr inbounds [6 x [10 x [4 x %5]]], ptr @g_2943, i64 0, i64 0, i64 0, i64 0, i32 1
   %i4174 = load i8, ptr %i4173, align 2, !tbaa !51
   %i4175 = sext i8 %i4174 to i64
   call fastcc void @transparent_crc(i64 %i4175, ptr @.str.2112, i32 signext undef)
@@ -8800,289 +8800,289 @@ bb25:                                             ; preds = %bb15
   %i4236 = load i16, ptr undef, align 2, !tbaa !50
   %i4237 = zext i16 %i4236 to i64
   call fastcc void @transparent_crc(i64 %i4237, ptr @.str.2141, i32 signext undef)
-  %i4238 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2944, i64 0, i32 0), align 2, !tbaa !23
+  %i4238 = load i16, ptr @g_2944, align 2, !tbaa !23
   %i4239 = sext i16 %i4238 to i64
   call fastcc void @transparent_crc(i64 %i4239, ptr @.str.2142, i32 signext undef)
-  %i4240 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2944, i64 0, i32 1), align 2, !tbaa !51
+  %i4240 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2944, i64 0, i32 1), align 2, !tbaa !51
   %i4241 = sext i8 %i4240 to i64
   call fastcc void @transparent_crc(i64 %i4241, ptr @.str.2143, i32 signext undef)
-  %i4242 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2944, i64 0, i32 2, i32 0), align 1
+  %i4242 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2944, i64 0, i32 2, i32 0), align 1
   %i4243 = lshr i120 %i4242, 107
   %i4244 = trunc i120 %i4243 to i64
   call fastcc void @transparent_crc(i64 %i4244, ptr @.str.2144, i32 signext undef)
-  %i4245 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2944, i64 0, i32 2, i32 0), align 1
+  %i4245 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2944, i64 0, i32 2, i32 0), align 1
   %i4246 = lshr i120 %i4245, 78
   %i4247 = trunc i120 %i4246 to i64
   %i4248 = and i64 %i4247, 536870911
   call fastcc void @transparent_crc(i64 %i4248, ptr @.str.2145, i32 signext undef)
-  %i4249 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2944, i64 0, i32 2, i32 0), align 1
+  %i4249 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2944, i64 0, i32 2, i32 0), align 1
   %i4250 = shl i120 %i4249, 42
   %i4251 = ashr i120 %i4250, 104
   %i4252 = shl nsw i120 %i4251, 32
   %i4253 = trunc i120 %i4252 to i64
   %i4254 = ashr exact i64 %i4253, 32
   call fastcc void @transparent_crc(i64 %i4254, ptr @.str.2146, i32 signext undef)
-  %i4255 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2944, i64 0, i32 2, i32 0), align 1
+  %i4255 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2944, i64 0, i32 2, i32 0), align 1
   %i4256 = shl i120 %i4255, 58
   %i4257 = ashr i120 %i4256, 105
   %i4258 = shl nsw i120 %i4257, 32
   %i4259 = trunc i120 %i4258 to i64
   %i4260 = ashr exact i64 %i4259, 32
   call fastcc void @transparent_crc(i64 %i4260, ptr @.str.2147, i32 signext undef)
-  %i4261 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2944, i64 0, i32 2, i32 0), align 1
+  %i4261 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2944, i64 0, i32 2, i32 0), align 1
   %i4262 = lshr i120 %i4261, 41
   %i4263 = trunc i120 %i4262 to i64
   %i4264 = and i64 %i4263, 63
   call fastcc void @transparent_crc(i64 %i4264, ptr @.str.2148, i32 signext undef)
-  %i4265 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2944, i64 0, i32 2, i32 0), align 1
+  %i4265 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2944, i64 0, i32 2, i32 0), align 1
   %i4266 = lshr i120 %i4265, 19
   %i4267 = trunc i120 %i4266 to i64
   %i4268 = and i64 %i4267, 4194303
   call fastcc void @transparent_crc(i64 %i4268, ptr @.str.2149, i32 signext undef)
-  %i4269 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2944, i64 0, i32 2, i32 0), align 1
+  %i4269 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2944, i64 0, i32 2, i32 0), align 1
   %i4270 = shl i120 %i4269, 101
   %i4271 = ashr exact i120 %i4270, 69
   %i4272 = trunc i120 %i4271 to i64
   %i4273 = ashr exact i64 %i4272, 32
   call fastcc void @transparent_crc(i64 %i4273, ptr @.str.2150, i32 signext undef)
-  %i4274 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2944, i64 0, i32 3, i32 0), align 2, !tbaa !44
+  %i4274 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2944, i64 0, i32 3, i32 0), align 2, !tbaa !44
   %i4275 = zext i8 %i4274 to i64
   call fastcc void @transparent_crc(i64 %i4275, ptr @.str.2151, i32 signext undef)
-  %i4276 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2944, i64 0, i32 3, i32 1), align 1, !tbaa !45
+  %i4276 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2944, i64 0, i32 3, i32 1), align 1, !tbaa !45
   %i4277 = sext i8 %i4276 to i64
   call fastcc void @transparent_crc(i64 %i4277, ptr @.str.2152, i32 signext undef)
-  %i4278 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2944, i64 0, i32 3, i32 2), align 2, !tbaa !46
+  %i4278 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2944, i64 0, i32 3, i32 2), align 2, !tbaa !46
   %i4279 = sext i16 %i4278 to i64
   call fastcc void @transparent_crc(i64 %i4279, ptr @.str.2153, i32 signext undef)
-  %i4280 = load i64, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2944, i64 0, i32 3, i32 3), align 2, !tbaa !47
+  %i4280 = load i64, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2944, i64 0, i32 3, i32 3), align 2, !tbaa !47
   call fastcc void @transparent_crc(i64 %i4280, ptr @.str.2154, i32 signext undef)
   call fastcc void @transparent_crc(i64 0, ptr @.str.2159, i32 signext undef)
-  %i4281 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2944, i64 0, i32 4, i32 0), align 2
+  %i4281 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2944, i64 0, i32 4, i32 0), align 2
   %i4282 = lshr i128 %i4281, 28
   %i4283 = trunc i128 %i4282 to i64
   %i4284 = and i64 %i4283, 3
   call fastcc void @transparent_crc(i64 %i4284, ptr @.str.2160, i32 signext undef)
-  %i4285 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2944, i64 0, i32 4, i32 0), align 2
+  %i4285 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2944, i64 0, i32 4, i32 0), align 2
   %i4286 = shl i128 %i4285, 100
   %i4287 = ashr i128 %i4286, 107
   %i4288 = shl nsw i128 %i4287, 32
   %i4289 = trunc i128 %i4288 to i64
   %i4290 = ashr exact i64 %i4289, 32
   call fastcc void @transparent_crc(i64 %i4290, ptr @.str.2161, i32 signext undef)
-  %i4291 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2944, i64 0, i32 5, i32 0, i32 0), align 2
+  %i4291 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2944, i64 0, i32 5, i32 0, i32 0), align 2
   %i4292 = lshr i80 %i4291, 57
   %i4293 = trunc i80 %i4292 to i64
   call fastcc void @transparent_crc(i64 %i4293, ptr @.str.2162, i32 signext undef)
-  %i4294 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2944, i64 0, i32 5, i32 0, i32 0), align 2
+  %i4294 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2944, i64 0, i32 5, i32 0, i32 0), align 2
   %i4295 = shl i80 %i4294, 23
   %i4296 = ashr i80 %i4295, 64
   %i4297 = shl nsw i80 %i4296, 32
   %i4298 = trunc i80 %i4297 to i64
   %i4299 = ashr exact i64 %i4298, 32
   call fastcc void @transparent_crc(i64 %i4299, ptr @.str.2163, i32 signext undef)
-  %i4300 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2944, i64 0, i32 5, i32 0, i32 0), align 2
+  %i4300 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2944, i64 0, i32 5, i32 0, i32 0), align 2
   %i4301 = shl i80 %i4300, 39
   %i4302 = ashr i80 %i4301, 62
   %i4303 = shl nsw i80 %i4302, 32
   %i4304 = trunc i80 %i4303 to i64
   %i4305 = ashr exact i64 %i4304, 32
   call fastcc void @transparent_crc(i64 %i4305, ptr @.str.2164, i32 signext undef)
-  %i4306 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2944, i64 0, i32 5, i32 0, i32 0), align 2
+  %i4306 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2944, i64 0, i32 5, i32 0, i32 0), align 2
   %i4307 = shl i80 %i4306, 57
   %i4308 = ashr i80 %i4307, 58
   %i4309 = shl nsw i80 %i4308, 32
   %i4310 = trunc i80 %i4309 to i64
   %i4311 = ashr exact i64 %i4310, 32
   call fastcc void @transparent_crc(i64 %i4311, ptr @.str.2165, i32 signext undef)
-  %i4312 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2944, i64 0, i32 5, i32 1), align 2
+  %i4312 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2944, i64 0, i32 5, i32 1), align 2
   %i4313 = lshr i80 %i4312, 49
   %i4314 = trunc i80 %i4313 to i64
   call fastcc void @transparent_crc(i64 %i4314, ptr @.str.2166, i32 signext undef)
-  %i4315 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2944, i64 0, i32 5, i32 1), align 2
+  %i4315 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2944, i64 0, i32 5, i32 1), align 2
   %i4316 = lshr i80 %i4315, 24
   %i4317 = trunc i80 %i4316 to i64
   %i4318 = and i64 %i4317, 33554431
   call fastcc void @transparent_crc(i64 %i4318, ptr @.str.2167, i32 signext undef)
-  %i4319 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2944, i64 0, i32 5, i32 1), align 2
+  %i4319 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2944, i64 0, i32 5, i32 1), align 2
   %i4320 = shl i80 %i4319, 56
   %i4321 = ashr i80 %i4320, 68
   %i4322 = shl nsw i80 %i4321, 32
   %i4323 = trunc i80 %i4322 to i64
   %i4324 = ashr exact i64 %i4323, 32
   call fastcc void @transparent_crc(i64 %i4324, ptr @.str.2168, i32 signext undef)
-  %i4325 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2944, i64 0, i32 5, i32 1), align 2
+  %i4325 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2944, i64 0, i32 5, i32 1), align 2
   %i4326 = lshr i80 %i4325, 11
   %i4327 = trunc i80 %i4326 to i64
   %i4328 = and i64 %i4327, 1
   call fastcc void @transparent_crc(i64 %i4328, ptr @.str.2169, i32 signext undef)
-  %i4329 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2944, i64 0, i32 5, i32 1), align 2
+  %i4329 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2944, i64 0, i32 5, i32 1), align 2
   %i4330 = shl i80 %i4329, 69
   %i4331 = ashr i80 %i4330, 72
   %i4332 = shl nsw i80 %i4331, 32
   %i4333 = trunc i80 %i4332 to i64
   %i4334 = ashr exact i64 %i4333, 32
   call fastcc void @transparent_crc(i64 %i4334, ptr @.str.2170, i32 signext undef)
-  %i4335 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2944, i64 0, i32 6), align 2, !tbaa !49
+  %i4335 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2944, i64 0, i32 6), align 2, !tbaa !49
   %i4336 = sext i16 %i4335 to i64
   call fastcc void @transparent_crc(i64 %i4336, ptr @.str.2171, i32 signext undef)
-  %i4337 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2944, i64 0, i32 7), align 2, !tbaa !50
+  %i4337 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2944, i64 0, i32 7), align 2, !tbaa !50
   %i4338 = zext i16 %i4337 to i64
   call fastcc void @transparent_crc(i64 %i4338, ptr @.str.2172, i32 signext undef)
-  %i4339 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2945, i64 0, i32 0), align 2, !tbaa !23
+  %i4339 = load i16, ptr @g_2945, align 2, !tbaa !23
   %i4340 = sext i16 %i4339 to i64
   call fastcc void @transparent_crc(i64 %i4340, ptr @.str.2173, i32 signext undef)
-  %i4341 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2945, i64 0, i32 1), align 2, !tbaa !51
+  %i4341 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2945, i64 0, i32 1), align 2, !tbaa !51
   %i4342 = sext i8 %i4341 to i64
   call fastcc void @transparent_crc(i64 %i4342, ptr @.str.2174, i32 signext undef)
-  %i4343 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2945, i64 0, i32 2, i32 0), align 1
+  %i4343 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2945, i64 0, i32 2, i32 0), align 1
   %i4344 = lshr i120 %i4343, 107
   %i4345 = trunc i120 %i4344 to i64
   call fastcc void @transparent_crc(i64 %i4345, ptr @.str.2175, i32 signext undef)
-  %i4346 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2945, i64 0, i32 2, i32 0), align 1
+  %i4346 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2945, i64 0, i32 2, i32 0), align 1
   %i4347 = lshr i120 %i4346, 78
   %i4348 = trunc i120 %i4347 to i64
   %i4349 = and i64 %i4348, 536870911
   call fastcc void @transparent_crc(i64 %i4349, ptr @.str.2176, i32 signext undef)
-  %i4350 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2945, i64 0, i32 2, i32 0), align 1
+  %i4350 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2945, i64 0, i32 2, i32 0), align 1
   %i4351 = shl i120 %i4350, 42
   %i4352 = ashr i120 %i4351, 104
   %i4353 = shl nsw i120 %i4352, 32
   %i4354 = trunc i120 %i4353 to i64
   %i4355 = ashr exact i64 %i4354, 32
   call fastcc void @transparent_crc(i64 %i4355, ptr @.str.2177, i32 signext undef)
-  %i4356 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2945, i64 0, i32 2, i32 0), align 1
+  %i4356 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2945, i64 0, i32 2, i32 0), align 1
   %i4357 = shl i120 %i4356, 58
   %i4358 = ashr i120 %i4357, 105
   %i4359 = shl nsw i120 %i4358, 32
   %i4360 = trunc i120 %i4359 to i64
   %i4361 = ashr exact i64 %i4360, 32
   call fastcc void @transparent_crc(i64 %i4361, ptr @.str.2178, i32 signext undef)
-  %i4362 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2945, i64 0, i32 2, i32 0), align 1
+  %i4362 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2945, i64 0, i32 2, i32 0), align 1
   %i4363 = lshr i120 %i4362, 41
   %i4364 = trunc i120 %i4363 to i64
   %i4365 = and i64 %i4364, 63
   call fastcc void @transparent_crc(i64 %i4365, ptr @.str.2179, i32 signext undef)
-  %i4366 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2945, i64 0, i32 2, i32 0), align 1
+  %i4366 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2945, i64 0, i32 2, i32 0), align 1
   %i4367 = lshr i120 %i4366, 19
   %i4368 = trunc i120 %i4367 to i64
   %i4369 = and i64 %i4368, 4194303
   call fastcc void @transparent_crc(i64 %i4369, ptr @.str.2180, i32 signext undef)
-  %i4370 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2945, i64 0, i32 2, i32 0), align 1
+  %i4370 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2945, i64 0, i32 2, i32 0), align 1
   %i4371 = shl i120 %i4370, 101
   %i4372 = ashr exact i120 %i4371, 69
   %i4373 = trunc i120 %i4372 to i64
   %i4374 = ashr exact i64 %i4373, 32
   call fastcc void @transparent_crc(i64 %i4374, ptr @.str.2181, i32 signext undef)
-  %i4375 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2945, i64 0, i32 3, i32 0), align 2, !tbaa !44
+  %i4375 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2945, i64 0, i32 3, i32 0), align 2, !tbaa !44
   %i4376 = zext i8 %i4375 to i64
   call fastcc void @transparent_crc(i64 %i4376, ptr @.str.2182, i32 signext undef)
-  %i4377 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2945, i64 0, i32 3, i32 1), align 1, !tbaa !45
+  %i4377 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2945, i64 0, i32 3, i32 1), align 1, !tbaa !45
   %i4378 = sext i8 %i4377 to i64
   call fastcc void @transparent_crc(i64 %i4378, ptr @.str.2183, i32 signext undef)
-  %i4379 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2945, i64 0, i32 3, i32 2), align 2, !tbaa !46
+  %i4379 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2945, i64 0, i32 3, i32 2), align 2, !tbaa !46
   %i4380 = sext i16 %i4379 to i64
   call fastcc void @transparent_crc(i64 %i4380, ptr @.str.2184, i32 signext undef)
-  %i4381 = load i64, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2945, i64 0, i32 3, i32 3), align 2, !tbaa !47
+  %i4381 = load i64, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2945, i64 0, i32 3, i32 3), align 2, !tbaa !47
   call fastcc void @transparent_crc(i64 %i4381, ptr @.str.2185, i32 signext undef)
-  %i4382 = load i32, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2945, i64 0, i32 3, i32 4), align 2, !tbaa !48
+  %i4382 = load i32, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2945, i64 0, i32 3, i32 4), align 2, !tbaa !48
   %i4383 = sext i32 %i4382 to i64
   call fastcc void @transparent_crc(i64 %i4383, ptr @.str.2186, i32 signext undef)
-  %i4384 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2945, i64 0, i32 4, i32 0), align 2
+  %i4384 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2945, i64 0, i32 4, i32 0), align 2
   %i4385 = ashr i128 %i4384, 99
   %i4386 = shl nsw i128 %i4385, 32
   %i4387 = trunc i128 %i4386 to i64
   %i4388 = ashr exact i64 %i4387, 32
   call fastcc void @transparent_crc(i64 %i4388, ptr @.str.2187, i32 signext undef)
-  %i4389 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2945, i64 0, i32 4, i32 0), align 2
+  %i4389 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2945, i64 0, i32 4, i32 0), align 2
   %i4390 = shl i128 %i4389, 29
   %i4391 = ashr i128 %i4390, 97
   %i4392 = shl nsw i128 %i4391, 32
   %i4393 = trunc i128 %i4392 to i64
   %i4394 = ashr exact i64 %i4393, 32
   call fastcc void @transparent_crc(i64 %i4394, ptr @.str.2188, i32 signext undef)
-  %i4395 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2945, i64 0, i32 4, i32 0), align 2
+  %i4395 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2945, i64 0, i32 4, i32 0), align 2
   %i4396 = shl i128 %i4395, 60
   %i4397 = ashr i128 %i4396, 108
   %i4398 = shl nsw i128 %i4397, 32
   %i4399 = trunc i128 %i4398 to i64
   %i4400 = ashr exact i64 %i4399, 32
   call fastcc void @transparent_crc(i64 %i4400, ptr @.str.2189, i32 signext undef)
-  %i4401 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2945, i64 0, i32 4, i32 0), align 2
+  %i4401 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2945, i64 0, i32 4, i32 0), align 2
   %i4402 = shl i128 %i4401, 80
   %i4403 = ashr i128 %i4402, 110
   %i4404 = shl nsw i128 %i4403, 32
   %i4405 = trunc i128 %i4404 to i64
   %i4406 = ashr exact i64 %i4405, 32
   call fastcc void @transparent_crc(i64 %i4406, ptr @.str.2190, i32 signext undef)
-  %i4407 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2945, i64 0, i32 4, i32 0), align 2
+  %i4407 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2945, i64 0, i32 4, i32 0), align 2
   %i4408 = lshr i128 %i4407, 28
   %i4409 = trunc i128 %i4408 to i64
   %i4410 = and i64 %i4409, 3
   call fastcc void @transparent_crc(i64 %i4410, ptr @.str.2191, i32 signext undef)
-  %i4411 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2945, i64 0, i32 4, i32 0), align 2
+  %i4411 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2945, i64 0, i32 4, i32 0), align 2
   %i4412 = shl i128 %i4411, 100
   %i4413 = ashr i128 %i4412, 107
   %i4414 = shl nsw i128 %i4413, 32
   %i4415 = trunc i128 %i4414 to i64
   %i4416 = ashr exact i64 %i4415, 32
   call fastcc void @transparent_crc(i64 %i4416, ptr @.str.2192, i32 signext undef)
-  %i4417 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2945, i64 0, i32 5, i32 0, i32 0), align 2
+  %i4417 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2945, i64 0, i32 5, i32 0, i32 0), align 2
   %i4418 = lshr i80 %i4417, 57
   %i4419 = trunc i80 %i4418 to i64
   call fastcc void @transparent_crc(i64 %i4419, ptr @.str.2193, i32 signext undef)
-  %i4420 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2945, i64 0, i32 5, i32 0, i32 0), align 2
+  %i4420 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2945, i64 0, i32 5, i32 0, i32 0), align 2
   %i4421 = shl i80 %i4420, 23
   %i4422 = ashr i80 %i4421, 64
   %i4423 = shl nsw i80 %i4422, 32
   %i4424 = trunc i80 %i4423 to i64
   %i4425 = ashr exact i64 %i4424, 32
   call fastcc void @transparent_crc(i64 %i4425, ptr @.str.2194, i32 signext undef)
-  %i4426 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2945, i64 0, i32 5, i32 0, i32 0), align 2
+  %i4426 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2945, i64 0, i32 5, i32 0, i32 0), align 2
   %i4427 = shl i80 %i4426, 39
   %i4428 = ashr i80 %i4427, 62
   %i4429 = shl nsw i80 %i4428, 32
   %i4430 = trunc i80 %i4429 to i64
   %i4431 = ashr exact i64 %i4430, 32
   call fastcc void @transparent_crc(i64 %i4431, ptr @.str.2195, i32 signext undef)
-  %i4432 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2945, i64 0, i32 5, i32 0, i32 0), align 2
+  %i4432 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2945, i64 0, i32 5, i32 0, i32 0), align 2
   %i4433 = shl i80 %i4432, 57
   %i4434 = ashr i80 %i4433, 58
   %i4435 = shl nsw i80 %i4434, 32
   %i4436 = trunc i80 %i4435 to i64
   %i4437 = ashr exact i64 %i4436, 32
   call fastcc void @transparent_crc(i64 %i4437, ptr @.str.2196, i32 signext undef)
-  %i4438 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2945, i64 0, i32 5, i32 1), align 2
+  %i4438 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2945, i64 0, i32 5, i32 1), align 2
   %i4439 = lshr i80 %i4438, 49
   %i4440 = trunc i80 %i4439 to i64
   call fastcc void @transparent_crc(i64 %i4440, ptr @.str.2197, i32 signext undef)
-  %i4441 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2945, i64 0, i32 5, i32 1), align 2
+  %i4441 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2945, i64 0, i32 5, i32 1), align 2
   %i4442 = lshr i80 %i4441, 24
   %i4443 = trunc i80 %i4442 to i64
   %i4444 = and i64 %i4443, 33554431
   call fastcc void @transparent_crc(i64 %i4444, ptr @.str.2198, i32 signext undef)
-  %i4445 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2945, i64 0, i32 5, i32 1), align 2
+  %i4445 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2945, i64 0, i32 5, i32 1), align 2
   %i4446 = shl i80 %i4445, 56
   %i4447 = ashr i80 %i4446, 68
   %i4448 = shl nsw i80 %i4447, 32
   %i4449 = trunc i80 %i4448 to i64
   %i4450 = ashr exact i64 %i4449, 32
   call fastcc void @transparent_crc(i64 %i4450, ptr @.str.2199, i32 signext undef)
-  %i4451 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2945, i64 0, i32 5, i32 1), align 2
+  %i4451 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2945, i64 0, i32 5, i32 1), align 2
   %i4452 = lshr i80 %i4451, 11
   %i4453 = trunc i80 %i4452 to i64
   %i4454 = and i64 %i4453, 1
   call fastcc void @transparent_crc(i64 %i4454, ptr @.str.2200, i32 signext undef)
-  %i4455 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2945, i64 0, i32 5, i32 1), align 2
+  %i4455 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2945, i64 0, i32 5, i32 1), align 2
   %i4456 = shl i80 %i4455, 69
   %i4457 = ashr i80 %i4456, 72
   %i4458 = shl nsw i80 %i4457, 32
   %i4459 = trunc i80 %i4458 to i64
   %i4460 = ashr exact i64 %i4459, 32
   call fastcc void @transparent_crc(i64 %i4460, ptr @.str.2201, i32 signext undef)
-  %i4461 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2945, i64 0, i32 6), align 2, !tbaa !49
+  %i4461 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2945, i64 0, i32 6), align 2, !tbaa !49
   %i4462 = sext i16 %i4461 to i64
   call fastcc void @transparent_crc(i64 %i4462, ptr @.str.2202, i32 signext undef)
-  %i4463 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2945, i64 0, i32 7), align 2, !tbaa !50
+  %i4463 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2945, i64 0, i32 7), align 2, !tbaa !50
   %i4464 = zext i16 %i4463 to i64
   call fastcc void @transparent_crc(i64 %i4464, ptr @.str.2203, i32 signext undef)
   %i4465 = load i16, ptr undef, align 2, !tbaa !23
@@ -9144,7 +9144,7 @@ bb25:                                             ; preds = %bb15
   %i4508 = load i32, ptr undef, align 2, !tbaa !48
   %i4509 = sext i32 %i4508 to i64
   call fastcc void @transparent_crc(i64 %i4509, ptr @.str.2217, i32 signext undef)
-  %i4510 = getelementptr inbounds [3 x [9 x [9 x %5]]], ptr bitcast (<{ <{ <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }> }>, <{ <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }> }>, <{ <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }> }> }>* @g_2946 to ptr), i64 0, i64 0, i64 0, i64 0, i32 4, i32 0
+  %i4510 = getelementptr inbounds [3 x [9 x [9 x %5]]], ptr @g_2946, i64 0, i64 0, i64 0, i64 0, i32 4, i32 0
   %i4511 = load volatile i128, ptr %i4510, align 2
   %i4512 = ashr i128 %i4511, 99
   %i4513 = shl nsw i128 %i4512, 32
@@ -9205,291 +9205,291 @@ bb25:                                             ; preds = %bb15
   call fastcc void @transparent_crc(i64 %i4557, ptr @.str.2231, i32 signext undef)
   %i4558 = load volatile i80, ptr undef, align 2
   call fastcc void @transparent_crc(i64 undef, ptr @.str.2252, i32 signext undef)
-  %i4559 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2947, i64 0, i32 4, i32 0), align 2
+  %i4559 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2947, i64 0, i32 4, i32 0), align 2
   %i4560 = lshr i128 %i4559, 28
   %i4561 = trunc i128 %i4560 to i64
   %i4562 = and i64 %i4561, 3
   call fastcc void @transparent_crc(i64 %i4562, ptr @.str.2253, i32 signext undef)
-  %i4563 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2947, i64 0, i32 4, i32 0), align 2
+  %i4563 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2947, i64 0, i32 4, i32 0), align 2
   %i4564 = shl i128 %i4563, 100
   %i4565 = ashr i128 %i4564, 107
   %i4566 = shl nsw i128 %i4565, 32
   %i4567 = trunc i128 %i4566 to i64
   %i4568 = ashr exact i64 %i4567, 32
   call fastcc void @transparent_crc(i64 %i4568, ptr @.str.2254, i32 signext undef)
-  %i4569 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2947, i64 0, i32 5, i32 0, i32 0), align 2
+  %i4569 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2947, i64 0, i32 5, i32 0, i32 0), align 2
   %i4570 = lshr i80 %i4569, 57
   %i4571 = trunc i80 %i4570 to i64
   call fastcc void @transparent_crc(i64 %i4571, ptr @.str.2255, i32 signext undef)
-  %i4572 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2947, i64 0, i32 5, i32 0, i32 0), align 2
+  %i4572 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2947, i64 0, i32 5, i32 0, i32 0), align 2
   %i4573 = shl i80 %i4572, 23
   %i4574 = ashr i80 %i4573, 64
   %i4575 = shl nsw i80 %i4574, 32
   %i4576 = trunc i80 %i4575 to i64
   %i4577 = ashr exact i64 %i4576, 32
   call fastcc void @transparent_crc(i64 %i4577, ptr @.str.2256, i32 signext undef)
-  %i4578 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2947, i64 0, i32 5, i32 0, i32 0), align 2
+  %i4578 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2947, i64 0, i32 5, i32 0, i32 0), align 2
   %i4579 = shl i80 %i4578, 39
   %i4580 = ashr i80 %i4579, 62
   %i4581 = shl nsw i80 %i4580, 32
   %i4582 = trunc i80 %i4581 to i64
   %i4583 = ashr exact i64 %i4582, 32
   call fastcc void @transparent_crc(i64 %i4583, ptr @.str.2257, i32 signext undef)
-  %i4584 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2947, i64 0, i32 5, i32 0, i32 0), align 2
+  %i4584 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2947, i64 0, i32 5, i32 0, i32 0), align 2
   %i4585 = shl i80 %i4584, 57
   %i4586 = ashr i80 %i4585, 58
   %i4587 = shl nsw i80 %i4586, 32
   %i4588 = trunc i80 %i4587 to i64
   %i4589 = ashr exact i64 %i4588, 32
   call fastcc void @transparent_crc(i64 %i4589, ptr @.str.2258, i32 signext undef)
-  %i4590 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2947, i64 0, i32 5, i32 1), align 2
+  %i4590 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2947, i64 0, i32 5, i32 1), align 2
   %i4591 = lshr i80 %i4590, 49
   %i4592 = trunc i80 %i4591 to i64
   call fastcc void @transparent_crc(i64 %i4592, ptr @.str.2259, i32 signext undef)
-  %i4593 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2947, i64 0, i32 5, i32 1), align 2
+  %i4593 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2947, i64 0, i32 5, i32 1), align 2
   %i4594 = lshr i80 %i4593, 24
   %i4595 = trunc i80 %i4594 to i64
   %i4596 = and i64 %i4595, 33554431
   call fastcc void @transparent_crc(i64 %i4596, ptr @.str.2260, i32 signext undef)
   call fastcc void @transparent_crc(i64 0, ptr @.str.2268, i32 signext undef)
-  %i4597 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2948, i64 0, i32 2, i32 0), align 1
+  %i4597 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2948, i64 0, i32 2, i32 0), align 1
   %i4598 = lshr i120 %i4597, 78
   %i4599 = trunc i120 %i4598 to i64
   %i4600 = and i64 %i4599, 536870911
   call fastcc void @transparent_crc(i64 %i4600, ptr @.str.2269, i32 signext undef)
-  %i4601 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2948, i64 0, i32 2, i32 0), align 1
+  %i4601 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2948, i64 0, i32 2, i32 0), align 1
   %i4602 = shl i120 %i4601, 42
   %i4603 = ashr i120 %i4602, 104
   %i4604 = shl nsw i120 %i4603, 32
   %i4605 = trunc i120 %i4604 to i64
   %i4606 = ashr exact i64 %i4605, 32
   call fastcc void @transparent_crc(i64 %i4606, ptr @.str.2270, i32 signext undef)
-  %i4607 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2948, i64 0, i32 2, i32 0), align 1
+  %i4607 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2948, i64 0, i32 2, i32 0), align 1
   %i4608 = shl i120 %i4607, 58
   %i4609 = ashr i120 %i4608, 105
   %i4610 = shl nsw i120 %i4609, 32
   %i4611 = trunc i120 %i4610 to i64
   %i4612 = ashr exact i64 %i4611, 32
   call fastcc void @transparent_crc(i64 %i4612, ptr @.str.2271, i32 signext undef)
-  %i4613 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2948, i64 0, i32 2, i32 0), align 1
+  %i4613 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2948, i64 0, i32 2, i32 0), align 1
   %i4614 = lshr i120 %i4613, 41
   %i4615 = trunc i120 %i4614 to i64
   %i4616 = and i64 %i4615, 63
   call fastcc void @transparent_crc(i64 %i4616, ptr @.str.2272, i32 signext undef)
-  %i4617 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2948, i64 0, i32 2, i32 0), align 1
+  %i4617 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2948, i64 0, i32 2, i32 0), align 1
   %i4618 = lshr i120 %i4617, 19
   %i4619 = trunc i120 %i4618 to i64
   %i4620 = and i64 %i4619, 4194303
   call fastcc void @transparent_crc(i64 %i4620, ptr @.str.2273, i32 signext undef)
-  %i4621 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2948, i64 0, i32 2, i32 0), align 1
+  %i4621 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2948, i64 0, i32 2, i32 0), align 1
   %i4622 = shl i120 %i4621, 101
   %i4623 = ashr exact i120 %i4622, 69
   %i4624 = trunc i120 %i4623 to i64
   %i4625 = ashr exact i64 %i4624, 32
   call fastcc void @transparent_crc(i64 %i4625, ptr @.str.2274, i32 signext undef)
-  %i4626 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2948, i64 0, i32 3, i32 0), align 2, !tbaa !44
+  %i4626 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2948, i64 0, i32 3, i32 0), align 2, !tbaa !44
   %i4627 = zext i8 %i4626 to i64
   call fastcc void @transparent_crc(i64 %i4627, ptr @.str.2275, i32 signext undef)
-  %i4628 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2948, i64 0, i32 3, i32 1), align 1, !tbaa !45
+  %i4628 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2948, i64 0, i32 3, i32 1), align 1, !tbaa !45
   %i4629 = sext i8 %i4628 to i64
   call fastcc void @transparent_crc(i64 %i4629, ptr @.str.2276, i32 signext undef)
-  %i4630 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2948, i64 0, i32 3, i32 2), align 2, !tbaa !46
+  %i4630 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2948, i64 0, i32 3, i32 2), align 2, !tbaa !46
   %i4631 = sext i16 %i4630 to i64
   call fastcc void @transparent_crc(i64 %i4631, ptr @.str.2277, i32 signext undef)
-  %i4632 = load i64, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2948, i64 0, i32 3, i32 3), align 2, !tbaa !47
+  %i4632 = load i64, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2948, i64 0, i32 3, i32 3), align 2, !tbaa !47
   call fastcc void @transparent_crc(i64 %i4632, ptr @.str.2278, i32 signext undef)
-  %i4633 = load i32, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2948, i64 0, i32 3, i32 4), align 2, !tbaa !48
+  %i4633 = load i32, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2948, i64 0, i32 3, i32 4), align 2, !tbaa !48
   %i4634 = sext i32 %i4633 to i64
   call fastcc void @transparent_crc(i64 %i4634, ptr @.str.2279, i32 signext undef)
-  %i4635 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2948, i64 0, i32 4, i32 0), align 2
+  %i4635 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2948, i64 0, i32 4, i32 0), align 2
   %i4636 = ashr i128 %i4635, 99
   %i4637 = shl nsw i128 %i4636, 32
   %i4638 = trunc i128 %i4637 to i64
   %i4639 = ashr exact i64 %i4638, 32
   call fastcc void @transparent_crc(i64 %i4639, ptr @.str.2280, i32 signext undef)
-  %i4640 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2948, i64 0, i32 4, i32 0), align 2
+  %i4640 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2948, i64 0, i32 4, i32 0), align 2
   %i4641 = shl i128 %i4640, 29
   %i4642 = ashr i128 %i4641, 97
   %i4643 = shl nsw i128 %i4642, 32
   %i4644 = trunc i128 %i4643 to i64
   %i4645 = ashr exact i64 %i4644, 32
   call fastcc void @transparent_crc(i64 %i4645, ptr @.str.2281, i32 signext undef)
-  %i4646 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2948, i64 0, i32 4, i32 0), align 2
+  %i4646 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2948, i64 0, i32 4, i32 0), align 2
   %i4647 = shl i128 %i4646, 60
   %i4648 = ashr i128 %i4647, 108
   %i4649 = shl nsw i128 %i4648, 32
   %i4650 = trunc i128 %i4649 to i64
   %i4651 = ashr exact i64 %i4650, 32
   call fastcc void @transparent_crc(i64 %i4651, ptr @.str.2282, i32 signext undef)
-  %i4652 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2948, i64 0, i32 4, i32 0), align 2
+  %i4652 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2948, i64 0, i32 4, i32 0), align 2
   %i4653 = shl i128 %i4652, 80
   %i4654 = ashr i128 %i4653, 110
   %i4655 = shl nsw i128 %i4654, 32
   %i4656 = trunc i128 %i4655 to i64
   %i4657 = ashr exact i64 %i4656, 32
   call fastcc void @transparent_crc(i64 %i4657, ptr @.str.2283, i32 signext undef)
-  %i4658 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2948, i64 0, i32 4, i32 0), align 2
+  %i4658 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2948, i64 0, i32 4, i32 0), align 2
   %i4659 = lshr i128 %i4658, 28
   %i4660 = trunc i128 %i4659 to i64
   %i4661 = and i64 %i4660, 3
   call fastcc void @transparent_crc(i64 %i4661, ptr @.str.2284, i32 signext undef)
-  %i4662 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2948, i64 0, i32 4, i32 0), align 2
+  %i4662 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2948, i64 0, i32 4, i32 0), align 2
   %i4663 = shl i128 %i4662, 100
   %i4664 = ashr i128 %i4663, 107
   %i4665 = shl nsw i128 %i4664, 32
   %i4666 = trunc i128 %i4665 to i64
   %i4667 = ashr exact i64 %i4666, 32
   call fastcc void @transparent_crc(i64 %i4667, ptr @.str.2285, i32 signext undef)
-  %i4668 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2948, i64 0, i32 5, i32 0, i32 0), align 2
+  %i4668 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2948, i64 0, i32 5, i32 0, i32 0), align 2
   %i4669 = lshr i80 %i4668, 57
   %i4670 = trunc i80 %i4669 to i64
   call fastcc void @transparent_crc(i64 %i4670, ptr @.str.2286, i32 signext undef)
-  %i4671 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2948, i64 0, i32 5, i32 0, i32 0), align 2
+  %i4671 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2948, i64 0, i32 5, i32 0, i32 0), align 2
   call fastcc void @transparent_crc(i64 undef, ptr @.str.2299, i32 signext undef)
-  %i4672 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2949, i64 0, i32 2, i32 0), align 1
+  %i4672 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2949, i64 0, i32 2, i32 0), align 1
   call fastcc void @transparent_crc(i64 0, ptr @.str.2301, i32 signext undef)
-  %i4673 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2949, i64 0, i32 2, i32 0), align 1
+  %i4673 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2949, i64 0, i32 2, i32 0), align 1
   %i4674 = shl i120 %i4673, 58
   %i4675 = ashr i120 %i4674, 105
   %i4676 = shl nsw i120 %i4675, 32
   %i4677 = trunc i120 %i4676 to i64
   %i4678 = ashr exact i64 %i4677, 32
   call fastcc void @transparent_crc(i64 %i4678, ptr @.str.2302, i32 signext undef)
-  %i4679 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2949, i64 0, i32 2, i32 0), align 1
+  %i4679 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2949, i64 0, i32 2, i32 0), align 1
   %i4680 = lshr i120 %i4679, 41
   %i4681 = trunc i120 %i4680 to i64
   %i4682 = and i64 %i4681, 63
   call fastcc void @transparent_crc(i64 %i4682, ptr @.str.2303, i32 signext undef)
-  %i4683 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2949, i64 0, i32 2, i32 0), align 1
+  %i4683 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2949, i64 0, i32 2, i32 0), align 1
   %i4684 = lshr i120 %i4683, 19
   %i4685 = trunc i120 %i4684 to i64
   %i4686 = and i64 %i4685, 4194303
   call fastcc void @transparent_crc(i64 %i4686, ptr @.str.2304, i32 signext undef)
-  %i4687 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2949, i64 0, i32 2, i32 0), align 1
+  %i4687 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2949, i64 0, i32 2, i32 0), align 1
   call fastcc void @transparent_crc(i64 0, ptr @.str.2319, i32 signext undef)
-  %i4688 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2949, i64 0, i32 5, i32 0, i32 0), align 2
+  %i4688 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2949, i64 0, i32 5, i32 0, i32 0), align 2
   %i4689 = shl i80 %i4688, 57
   %i4690 = ashr i80 %i4689, 58
   %i4691 = shl nsw i80 %i4690, 32
   %i4692 = trunc i80 %i4691 to i64
   %i4693 = ashr exact i64 %i4692, 32
   call fastcc void @transparent_crc(i64 %i4693, ptr @.str.2320, i32 signext undef)
-  %i4694 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2949, i64 0, i32 5, i32 1), align 2
+  %i4694 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2949, i64 0, i32 5, i32 1), align 2
   %i4695 = lshr i80 %i4694, 49
   %i4696 = trunc i80 %i4695 to i64
   call fastcc void @transparent_crc(i64 %i4696, ptr @.str.2321, i32 signext undef)
-  %i4697 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2949, i64 0, i32 5, i32 1), align 2
+  %i4697 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2949, i64 0, i32 5, i32 1), align 2
   %i4698 = lshr i80 %i4697, 24
   %i4699 = trunc i80 %i4698 to i64
   %i4700 = and i64 %i4699, 33554431
   call fastcc void @transparent_crc(i64 %i4700, ptr @.str.2322, i32 signext undef)
-  %i4701 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2949, i64 0, i32 5, i32 1), align 2
+  %i4701 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2949, i64 0, i32 5, i32 1), align 2
   %i4702 = shl i80 %i4701, 56
   %i4703 = ashr i80 %i4702, 68
   %i4704 = shl nsw i80 %i4703, 32
   %i4705 = trunc i80 %i4704 to i64
   %i4706 = ashr exact i64 %i4705, 32
   call fastcc void @transparent_crc(i64 %i4706, ptr @.str.2323, i32 signext undef)
-  %i4707 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2949, i64 0, i32 5, i32 1), align 2
+  %i4707 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2949, i64 0, i32 5, i32 1), align 2
   %i4708 = lshr i80 %i4707, 11
   %i4709 = trunc i80 %i4708 to i64
   %i4710 = and i64 %i4709, 1
   call fastcc void @transparent_crc(i64 %i4710, ptr @.str.2324, i32 signext undef)
-  %i4711 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2949, i64 0, i32 5, i32 1), align 2
+  %i4711 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2949, i64 0, i32 5, i32 1), align 2
   %i4712 = shl i80 %i4711, 69
   %i4713 = ashr i80 %i4712, 72
   %i4714 = shl nsw i80 %i4713, 32
   %i4715 = trunc i80 %i4714 to i64
   %i4716 = ashr exact i64 %i4715, 32
   call fastcc void @transparent_crc(i64 %i4716, ptr @.str.2325, i32 signext undef)
-  %i4717 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2949, i64 0, i32 6), align 2, !tbaa !49
+  %i4717 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2949, i64 0, i32 6), align 2, !tbaa !49
   %i4718 = sext i16 %i4717 to i64
   call fastcc void @transparent_crc(i64 %i4718, ptr @.str.2326, i32 signext undef)
-  %i4719 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2949, i64 0, i32 7), align 2, !tbaa !50
+  %i4719 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2949, i64 0, i32 7), align 2, !tbaa !50
   %i4720 = zext i16 %i4719 to i64
   call fastcc void @transparent_crc(i64 %i4720, ptr @.str.2327, i32 signext undef)
-  %i4721 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2950, i64 0, i32 0), align 2, !tbaa !23
+  %i4721 = load i16, ptr @g_2950, align 2, !tbaa !23
   %i4722 = sext i16 %i4721 to i64
   call fastcc void @transparent_crc(i64 %i4722, ptr @.str.2328, i32 signext undef)
-  %i4723 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2950, i64 0, i32 1), align 2, !tbaa !51
+  %i4723 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2950, i64 0, i32 1), align 2, !tbaa !51
   %i4724 = sext i8 %i4723 to i64
   call fastcc void @transparent_crc(i64 %i4724, ptr @.str.2329, i32 signext undef)
-  %i4725 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2950, i64 0, i32 2, i32 0), align 1
+  %i4725 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2950, i64 0, i32 2, i32 0), align 1
   %i4726 = lshr i120 %i4725, 107
   %i4727 = trunc i120 %i4726 to i64
   call fastcc void @transparent_crc(i64 %i4727, ptr @.str.2330, i32 signext undef)
-  %i4728 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2950, i64 0, i32 2, i32 0), align 1
+  %i4728 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2950, i64 0, i32 2, i32 0), align 1
   %i4729 = lshr i120 %i4728, 78
   %i4730 = trunc i120 %i4729 to i64
   %i4731 = and i64 %i4730, 536870911
   call fastcc void @transparent_crc(i64 %i4731, ptr @.str.2331, i32 signext undef)
-  %i4732 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2950, i64 0, i32 2, i32 0), align 1
+  %i4732 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2950, i64 0, i32 2, i32 0), align 1
   %i4733 = shl i120 %i4732, 42
   %i4734 = ashr i120 %i4733, 104
   %i4735 = shl nsw i120 %i4734, 32
   %i4736 = trunc i120 %i4735 to i64
   %i4737 = ashr exact i64 %i4736, 32
   call fastcc void @transparent_crc(i64 %i4737, ptr @.str.2332, i32 signext undef)
-  %i4738 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2950, i64 0, i32 2, i32 0), align 1
+  %i4738 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2950, i64 0, i32 2, i32 0), align 1
   %i4739 = shl i120 %i4738, 58
   %i4740 = ashr i120 %i4739, 105
   %i4741 = shl nsw i120 %i4740, 32
   %i4742 = trunc i120 %i4741 to i64
   %i4743 = ashr exact i64 %i4742, 32
   call fastcc void @transparent_crc(i64 %i4743, ptr @.str.2333, i32 signext undef)
-  %i4744 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2950, i64 0, i32 2, i32 0), align 1
+  %i4744 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2950, i64 0, i32 2, i32 0), align 1
   %i4745 = lshr i120 %i4744, 41
   %i4746 = trunc i120 %i4745 to i64
   %i4747 = and i64 %i4746, 63
   call fastcc void @transparent_crc(i64 %i4747, ptr @.str.2334, i32 signext undef)
-  %i4748 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2950, i64 0, i32 2, i32 0), align 1
+  %i4748 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2950, i64 0, i32 2, i32 0), align 1
   %i4749 = lshr i120 %i4748, 19
   %i4750 = trunc i120 %i4749 to i64
   %i4751 = and i64 %i4750, 4194303
   call fastcc void @transparent_crc(i64 %i4751, ptr @.str.2335, i32 signext undef)
-  %i4752 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2950, i64 0, i32 2, i32 0), align 1
+  %i4752 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2950, i64 0, i32 2, i32 0), align 1
   %i4753 = shl i120 %i4752, 101
   %i4754 = ashr exact i120 %i4753, 69
   %i4755 = trunc i120 %i4754 to i64
   %i4756 = ashr exact i64 %i4755, 32
   call fastcc void @transparent_crc(i64 %i4756, ptr @.str.2336, i32 signext undef)
-  %i4757 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2950, i64 0, i32 3, i32 0), align 2, !tbaa !44
+  %i4757 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2950, i64 0, i32 3, i32 0), align 2, !tbaa !44
   %i4758 = zext i8 %i4757 to i64
   call fastcc void @transparent_crc(i64 %i4758, ptr @.str.2337, i32 signext undef)
-  %i4759 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2950, i64 0, i32 3, i32 1), align 1, !tbaa !45
+  %i4759 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2950, i64 0, i32 3, i32 1), align 1, !tbaa !45
   %i4760 = sext i8 %i4759 to i64
   call fastcc void @transparent_crc(i64 %i4760, ptr @.str.2338, i32 signext undef)
-  %i4761 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2950, i64 0, i32 3, i32 2), align 2, !tbaa !46
+  %i4761 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2950, i64 0, i32 3, i32 2), align 2, !tbaa !46
   %i4762 = sext i16 %i4761 to i64
   call fastcc void @transparent_crc(i64 %i4762, ptr @.str.2339, i32 signext undef)
-  %i4763 = load i64, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2950, i64 0, i32 3, i32 3), align 2, !tbaa !47
+  %i4763 = load i64, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2950, i64 0, i32 3, i32 3), align 2, !tbaa !47
   call fastcc void @transparent_crc(i64 %i4763, ptr @.str.2340, i32 signext undef)
-  %i4764 = load i32, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2950, i64 0, i32 3, i32 4), align 2, !tbaa !48
+  %i4764 = load i32, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2950, i64 0, i32 3, i32 4), align 2, !tbaa !48
   %i4765 = sext i32 %i4764 to i64
   call fastcc void @transparent_crc(i64 %i4765, ptr @.str.2341, i32 signext undef)
-  %i4766 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2950, i64 0, i32 4, i32 0), align 2
+  %i4766 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2950, i64 0, i32 4, i32 0), align 2
   %i4767 = ashr i128 %i4766, 99
   %i4768 = shl nsw i128 %i4767, 32
   %i4769 = trunc i128 %i4768 to i64
   %i4770 = ashr exact i64 %i4769, 32
   call fastcc void @transparent_crc(i64 %i4770, ptr @.str.2342, i32 signext undef)
-  %i4771 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2950, i64 0, i32 4, i32 0), align 2
+  %i4771 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2950, i64 0, i32 4, i32 0), align 2
   call fastcc void @transparent_crc(i64 undef, ptr @.str.2364, i32 signext undef)
-  %i4772 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2951, i64 0, i32 2, i32 0), align 1
+  %i4772 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2951, i64 0, i32 2, i32 0), align 1
   call fastcc void @transparent_crc(i64 0, ptr @.str.2365, i32 signext undef)
-  %i4773 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2951, i64 0, i32 2, i32 0), align 1
+  %i4773 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2951, i64 0, i32 2, i32 0), align 1
   %i4774 = lshr i120 %i4773, 19
   %i4775 = trunc i120 %i4774 to i64
   %i4776 = and i64 %i4775, 4194303
   call fastcc void @transparent_crc(i64 %i4776, ptr @.str.2366, i32 signext undef)
-  %i4777 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2951, i64 0, i32 2, i32 0), align 1
+  %i4777 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2951, i64 0, i32 2, i32 0), align 1
   %i4778 = shl i120 %i4777, 101
   %i4779 = ashr exact i120 %i4778, 69
   %i4780 = trunc i120 %i4779 to i64
   %i4781 = ashr exact i64 %i4780, 32
   call fastcc void @transparent_crc(i64 %i4781, ptr @.str.2367, i32 signext undef)
   call fastcc void @transparent_crc(i64 0, ptr @.str.2375, i32 signext undef)
-  %i4782 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2951, i64 0, i32 4, i32 0), align 2
+  %i4782 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2951, i64 0, i32 4, i32 0), align 2
   call fastcc void @transparent_crc(i64 undef, ptr @.str.2393, i32 signext undef)
   %i4783 = load volatile i120, ptr undef, align 1
   call fastcc void @transparent_crc(i64 undef, ptr @.str.2394, i32 signext undef)
@@ -9530,7 +9530,7 @@ bb25:                                             ; preds = %bb15
   %i4810 = load i32, ptr undef, align 2, !tbaa !48
   %i4811 = sext i32 %i4810 to i64
   call fastcc void @transparent_crc(i64 %i4811, ptr @.str.2403, i32 signext undef)
-  %i4812 = getelementptr inbounds [8 x [5 x %5]], ptr bitcast (<{ <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>, <{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }> }>* @g_2952 to ptr), i64 0, i64 0, i64 0, i32 4, i32 0
+  %i4812 = getelementptr inbounds [8 x [5 x %5]], ptr @g_2952, i64 0, i64 0, i64 0, i32 4, i32 0
   %i4813 = load volatile i128, ptr %i4812, align 2
   %i4814 = ashr i128 %i4813, 99
   %i4815 = shl nsw i128 %i4814, 32
@@ -9629,656 +9629,656 @@ bb25:                                             ; preds = %bb15
   %i4892 = load i16, ptr undef, align 2, !tbaa !50
   %i4893 = zext i16 %i4892 to i64
   call fastcc void @transparent_crc(i64 %i4893, ptr @.str.2420, i32 signext undef)
-  %i4894 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2953, i64 0, i32 0), align 2, !tbaa !23
+  %i4894 = load i16, ptr @g_2953, align 2, !tbaa !23
   %i4895 = sext i16 %i4894 to i64
   call fastcc void @transparent_crc(i64 %i4895, ptr @.str.2421, i32 signext undef)
-  %i4896 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2953, i64 0, i32 1), align 2, !tbaa !51
+  %i4896 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2953, i64 0, i32 1), align 2, !tbaa !51
   %i4897 = sext i8 %i4896 to i64
   call fastcc void @transparent_crc(i64 %i4897, ptr @.str.2422, i32 signext undef)
-  %i4898 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2953, i64 0, i32 2, i32 0), align 1
+  %i4898 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2953, i64 0, i32 2, i32 0), align 1
   %i4899 = lshr i120 %i4898, 107
   %i4900 = trunc i120 %i4899 to i64
   call fastcc void @transparent_crc(i64 %i4900, ptr @.str.2423, i32 signext undef)
-  %i4901 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2953, i64 0, i32 2, i32 0), align 1
+  %i4901 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2953, i64 0, i32 2, i32 0), align 1
   %i4902 = lshr i120 %i4901, 78
   %i4903 = trunc i120 %i4902 to i64
   %i4904 = and i64 %i4903, 536870911
   call fastcc void @transparent_crc(i64 %i4904, ptr @.str.2424, i32 signext undef)
-  %i4905 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2953, i64 0, i32 2, i32 0), align 1
+  %i4905 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2953, i64 0, i32 2, i32 0), align 1
   %i4906 = shl i120 %i4905, 42
   %i4907 = ashr i120 %i4906, 104
   %i4908 = shl nsw i120 %i4907, 32
   %i4909 = trunc i120 %i4908 to i64
   %i4910 = ashr exact i64 %i4909, 32
   call fastcc void @transparent_crc(i64 %i4910, ptr @.str.2425, i32 signext undef)
-  %i4911 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2953, i64 0, i32 2, i32 0), align 1
+  %i4911 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2953, i64 0, i32 2, i32 0), align 1
   %i4912 = shl i120 %i4911, 58
   %i4913 = ashr i120 %i4912, 105
   %i4914 = shl nsw i120 %i4913, 32
   %i4915 = trunc i120 %i4914 to i64
   %i4916 = ashr exact i64 %i4915, 32
   call fastcc void @transparent_crc(i64 %i4916, ptr @.str.2426, i32 signext undef)
-  %i4917 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2953, i64 0, i32 2, i32 0), align 1
+  %i4917 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2953, i64 0, i32 2, i32 0), align 1
   %i4918 = lshr i120 %i4917, 41
   %i4919 = trunc i120 %i4918 to i64
   %i4920 = and i64 %i4919, 63
   call fastcc void @transparent_crc(i64 %i4920, ptr @.str.2427, i32 signext undef)
-  %i4921 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2953, i64 0, i32 2, i32 0), align 1
+  %i4921 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2953, i64 0, i32 2, i32 0), align 1
   %i4922 = lshr i120 %i4921, 19
   %i4923 = trunc i120 %i4922 to i64
   %i4924 = and i64 %i4923, 4194303
   call fastcc void @transparent_crc(i64 %i4924, ptr @.str.2428, i32 signext undef)
-  %i4925 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2953, i64 0, i32 2, i32 0), align 1
+  %i4925 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2953, i64 0, i32 2, i32 0), align 1
   %i4926 = shl i120 %i4925, 101
   %i4927 = ashr exact i120 %i4926, 69
   %i4928 = trunc i120 %i4927 to i64
   %i4929 = ashr exact i64 %i4928, 32
   call fastcc void @transparent_crc(i64 %i4929, ptr @.str.2429, i32 signext undef)
-  %i4930 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2953, i64 0, i32 3, i32 0), align 2, !tbaa !44
+  %i4930 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2953, i64 0, i32 3, i32 0), align 2, !tbaa !44
   %i4931 = zext i8 %i4930 to i64
   call fastcc void @transparent_crc(i64 %i4931, ptr @.str.2430, i32 signext undef)
-  %i4932 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2953, i64 0, i32 3, i32 1), align 1, !tbaa !45
+  %i4932 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2953, i64 0, i32 3, i32 1), align 1, !tbaa !45
   %i4933 = sext i8 %i4932 to i64
   call fastcc void @transparent_crc(i64 %i4933, ptr @.str.2431, i32 signext undef)
-  %i4934 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2953, i64 0, i32 3, i32 2), align 2, !tbaa !46
+  %i4934 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2953, i64 0, i32 3, i32 2), align 2, !tbaa !46
   %i4935 = sext i16 %i4934 to i64
   call fastcc void @transparent_crc(i64 %i4935, ptr @.str.2432, i32 signext undef)
-  %i4936 = load i64, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2953, i64 0, i32 3, i32 3), align 2, !tbaa !47
+  %i4936 = load i64, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2953, i64 0, i32 3, i32 3), align 2, !tbaa !47
   call fastcc void @transparent_crc(i64 %i4936, ptr @.str.2433, i32 signext undef)
-  %i4937 = load i32, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2953, i64 0, i32 3, i32 4), align 2, !tbaa !48
+  %i4937 = load i32, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2953, i64 0, i32 3, i32 4), align 2, !tbaa !48
   %i4938 = sext i32 %i4937 to i64
   call fastcc void @transparent_crc(i64 %i4938, ptr @.str.2434, i32 signext undef)
-  %i4939 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2953, i64 0, i32 4, i32 0), align 2
+  %i4939 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2953, i64 0, i32 4, i32 0), align 2
   %i4940 = ashr i128 %i4939, 99
   %i4941 = shl nsw i128 %i4940, 32
   %i4942 = trunc i128 %i4941 to i64
   %i4943 = ashr exact i64 %i4942, 32
   call fastcc void @transparent_crc(i64 %i4943, ptr @.str.2435, i32 signext undef)
-  %i4944 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2953, i64 0, i32 4, i32 0), align 2
+  %i4944 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2953, i64 0, i32 4, i32 0), align 2
   %i4945 = shl i128 %i4944, 29
   %i4946 = ashr i128 %i4945, 97
   %i4947 = shl nsw i128 %i4946, 32
   %i4948 = trunc i128 %i4947 to i64
   %i4949 = ashr exact i64 %i4948, 32
   call fastcc void @transparent_crc(i64 %i4949, ptr @.str.2436, i32 signext undef)
-  %i4950 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2953, i64 0, i32 4, i32 0), align 2
+  %i4950 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2953, i64 0, i32 4, i32 0), align 2
   %i4951 = shl i128 %i4950, 60
   %i4952 = ashr i128 %i4951, 108
   %i4953 = shl nsw i128 %i4952, 32
   %i4954 = trunc i128 %i4953 to i64
   %i4955 = ashr exact i64 %i4954, 32
   call fastcc void @transparent_crc(i64 %i4955, ptr @.str.2437, i32 signext undef)
-  %i4956 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2953, i64 0, i32 4, i32 0), align 2
+  %i4956 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2953, i64 0, i32 4, i32 0), align 2
   %i4957 = shl i128 %i4956, 80
   %i4958 = ashr i128 %i4957, 110
   %i4959 = shl nsw i128 %i4958, 32
   %i4960 = trunc i128 %i4959 to i64
   %i4961 = ashr exact i64 %i4960, 32
   call fastcc void @transparent_crc(i64 %i4961, ptr @.str.2438, i32 signext undef)
-  %i4962 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2953, i64 0, i32 4, i32 0), align 2
+  %i4962 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2953, i64 0, i32 4, i32 0), align 2
   %i4963 = lshr i128 %i4962, 28
   %i4964 = trunc i128 %i4963 to i64
   %i4965 = and i64 %i4964, 3
   call fastcc void @transparent_crc(i64 %i4965, ptr @.str.2439, i32 signext undef)
-  %i4966 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2953, i64 0, i32 4, i32 0), align 2
-  %i4967 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2953, i64 0, i32 5, i32 0, i32 0), align 2
+  %i4966 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2953, i64 0, i32 4, i32 0), align 2
+  %i4967 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2953, i64 0, i32 5, i32 0, i32 0), align 2
   %i4968 = lshr i80 %i4967, 57
   %i4969 = trunc i80 %i4968 to i64
   call fastcc void @transparent_crc(i64 %i4969, ptr @.str.2441, i32 signext undef)
   call fastcc void @transparent_crc(i64 0, ptr @.str.2490, i32 signext undef)
-  %i4970 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2955, i64 0, i32 2, i32 0), align 1
+  %i4970 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2955, i64 0, i32 2, i32 0), align 1
   %i4971 = shl i120 %i4970, 101
   %i4972 = ashr exact i120 %i4971, 69
   %i4973 = trunc i120 %i4972 to i64
   %i4974 = ashr exact i64 %i4973, 32
   call fastcc void @transparent_crc(i64 %i4974, ptr @.str.2491, i32 signext undef)
-  %i4975 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2955, i64 0, i32 3, i32 0), align 2, !tbaa !44
+  %i4975 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2955, i64 0, i32 3, i32 0), align 2, !tbaa !44
   %i4976 = zext i8 %i4975 to i64
   call fastcc void @transparent_crc(i64 %i4976, ptr @.str.2492, i32 signext undef)
-  %i4977 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2955, i64 0, i32 3, i32 1), align 1, !tbaa !45
+  %i4977 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2955, i64 0, i32 3, i32 1), align 1, !tbaa !45
   %i4978 = sext i8 %i4977 to i64
   call fastcc void @transparent_crc(i64 %i4978, ptr @.str.2493, i32 signext undef)
-  %i4979 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2955, i64 0, i32 3, i32 2), align 2, !tbaa !46
+  %i4979 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2955, i64 0, i32 3, i32 2), align 2, !tbaa !46
   %i4980 = sext i16 %i4979 to i64
   call fastcc void @transparent_crc(i64 %i4980, ptr @.str.2494, i32 signext undef)
-  %i4981 = load i64, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2955, i64 0, i32 3, i32 3), align 2, !tbaa !47
+  %i4981 = load i64, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2955, i64 0, i32 3, i32 3), align 2, !tbaa !47
   call fastcc void @transparent_crc(i64 %i4981, ptr @.str.2495, i32 signext undef)
-  %i4982 = load i32, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2955, i64 0, i32 3, i32 4), align 2, !tbaa !48
+  %i4982 = load i32, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2955, i64 0, i32 3, i32 4), align 2, !tbaa !48
   %i4983 = sext i32 %i4982 to i64
   call fastcc void @transparent_crc(i64 %i4983, ptr @.str.2496, i32 signext undef)
-  %i4984 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2955, i64 0, i32 4, i32 0), align 2
+  %i4984 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2955, i64 0, i32 4, i32 0), align 2
   %i4985 = ashr i128 %i4984, 99
   %i4986 = shl nsw i128 %i4985, 32
   %i4987 = trunc i128 %i4986 to i64
   %i4988 = ashr exact i64 %i4987, 32
   call fastcc void @transparent_crc(i64 %i4988, ptr @.str.2497, i32 signext undef)
-  %i4989 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2955, i64 0, i32 4, i32 0), align 2
+  %i4989 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2955, i64 0, i32 4, i32 0), align 2
   %i4990 = shl i128 %i4989, 29
   %i4991 = ashr i128 %i4990, 97
   %i4992 = shl nsw i128 %i4991, 32
   %i4993 = trunc i128 %i4992 to i64
   %i4994 = ashr exact i64 %i4993, 32
   call fastcc void @transparent_crc(i64 %i4994, ptr @.str.2498, i32 signext undef)
-  %i4995 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2955, i64 0, i32 4, i32 0), align 2
-  %i4996 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2955, i64 0, i32 5, i32 1), align 2
+  %i4995 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2955, i64 0, i32 4, i32 0), align 2
+  %i4996 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2955, i64 0, i32 5, i32 1), align 2
   %i4997 = shl i80 %i4996, 69
   %i4998 = ashr i80 %i4997, 72
   %i4999 = shl nsw i80 %i4998, 32
   %i5000 = trunc i80 %i4999 to i64
   %i5001 = ashr exact i64 %i5000, 32
   call fastcc void @transparent_crc(i64 %i5001, ptr @.str.2511, i32 signext undef)
-  %i5002 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2955, i64 0, i32 6), align 2, !tbaa !49
+  %i5002 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2955, i64 0, i32 6), align 2, !tbaa !49
   %i5003 = sext i16 %i5002 to i64
   call fastcc void @transparent_crc(i64 %i5003, ptr @.str.2512, i32 signext undef)
-  %i5004 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2955, i64 0, i32 7), align 2, !tbaa !50
+  %i5004 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2955, i64 0, i32 7), align 2, !tbaa !50
   %i5005 = zext i16 %i5004 to i64
   call fastcc void @transparent_crc(i64 %i5005, ptr @.str.2513, i32 signext undef)
-  %i5006 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2956, i64 0, i32 0), align 2, !tbaa !23
+  %i5006 = load i16, ptr @g_2956, align 2, !tbaa !23
   %i5007 = sext i16 %i5006 to i64
   call fastcc void @transparent_crc(i64 %i5007, ptr @.str.2514, i32 signext undef)
-  %i5008 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2956, i64 0, i32 1), align 2, !tbaa !51
+  %i5008 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2956, i64 0, i32 1), align 2, !tbaa !51
   %i5009 = sext i8 %i5008 to i64
   call fastcc void @transparent_crc(i64 %i5009, ptr @.str.2515, i32 signext undef)
-  %i5010 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2956, i64 0, i32 2, i32 0), align 1
+  %i5010 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2956, i64 0, i32 2, i32 0), align 1
   %i5011 = lshr i120 %i5010, 107
   %i5012 = trunc i120 %i5011 to i64
   call fastcc void @transparent_crc(i64 %i5012, ptr @.str.2516, i32 signext undef)
-  %i5013 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2956, i64 0, i32 2, i32 0), align 1
+  %i5013 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2956, i64 0, i32 2, i32 0), align 1
   %i5014 = lshr i120 %i5013, 78
   %i5015 = trunc i120 %i5014 to i64
   %i5016 = and i64 %i5015, 536870911
   call fastcc void @transparent_crc(i64 %i5016, ptr @.str.2517, i32 signext undef)
-  %i5017 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2956, i64 0, i32 2, i32 0), align 1
+  %i5017 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2956, i64 0, i32 2, i32 0), align 1
   %i5018 = shl i120 %i5017, 42
   %i5019 = ashr i120 %i5018, 104
   %i5020 = shl nsw i120 %i5019, 32
   %i5021 = trunc i120 %i5020 to i64
   %i5022 = ashr exact i64 %i5021, 32
   call fastcc void @transparent_crc(i64 %i5022, ptr @.str.2518, i32 signext undef)
-  %i5023 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2956, i64 0, i32 2, i32 0), align 1
+  %i5023 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2956, i64 0, i32 2, i32 0), align 1
   %i5024 = shl i120 %i5023, 58
   %i5025 = ashr i120 %i5024, 105
   %i5026 = shl nsw i120 %i5025, 32
   %i5027 = trunc i120 %i5026 to i64
   %i5028 = ashr exact i64 %i5027, 32
   call fastcc void @transparent_crc(i64 %i5028, ptr @.str.2519, i32 signext undef)
-  %i5029 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2956, i64 0, i32 2, i32 0), align 1
+  %i5029 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2956, i64 0, i32 2, i32 0), align 1
   %i5030 = lshr i120 %i5029, 41
   %i5031 = trunc i120 %i5030 to i64
   %i5032 = and i64 %i5031, 63
   call fastcc void @transparent_crc(i64 %i5032, ptr @.str.2520, i32 signext undef)
-  %i5033 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2956, i64 0, i32 2, i32 0), align 1
+  %i5033 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2956, i64 0, i32 2, i32 0), align 1
   %i5034 = lshr i120 %i5033, 19
   %i5035 = trunc i120 %i5034 to i64
   %i5036 = and i64 %i5035, 4194303
   call fastcc void @transparent_crc(i64 %i5036, ptr @.str.2521, i32 signext undef)
-  %i5037 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2956, i64 0, i32 2, i32 0), align 1
+  %i5037 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2956, i64 0, i32 2, i32 0), align 1
   %i5038 = shl i120 %i5037, 101
   %i5039 = ashr exact i120 %i5038, 69
   %i5040 = trunc i120 %i5039 to i64
   %i5041 = ashr exact i64 %i5040, 32
   call fastcc void @transparent_crc(i64 %i5041, ptr @.str.2522, i32 signext undef)
-  %i5042 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2956, i64 0, i32 3, i32 0), align 2, !tbaa !44
+  %i5042 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2956, i64 0, i32 3, i32 0), align 2, !tbaa !44
   %i5043 = zext i8 %i5042 to i64
   call fastcc void @transparent_crc(i64 %i5043, ptr @.str.2523, i32 signext undef)
-  %i5044 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2956, i64 0, i32 3, i32 1), align 1, !tbaa !45
+  %i5044 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2956, i64 0, i32 3, i32 1), align 1, !tbaa !45
   %i5045 = sext i8 %i5044 to i64
   call fastcc void @transparent_crc(i64 %i5045, ptr @.str.2524, i32 signext undef)
-  %i5046 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2956, i64 0, i32 3, i32 2), align 2, !tbaa !46
+  %i5046 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2956, i64 0, i32 3, i32 2), align 2, !tbaa !46
   %i5047 = sext i16 %i5046 to i64
   call fastcc void @transparent_crc(i64 %i5047, ptr @.str.2525, i32 signext undef)
-  %i5048 = load i64, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2956, i64 0, i32 3, i32 3), align 2, !tbaa !47
+  %i5048 = load i64, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2956, i64 0, i32 3, i32 3), align 2, !tbaa !47
   call fastcc void @transparent_crc(i64 %i5048, ptr @.str.2526, i32 signext undef)
   call fastcc void @transparent_crc(i64 0, ptr @.str.2527, i32 signext undef)
-  %i5049 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2956, i64 0, i32 4, i32 0), align 2
+  %i5049 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2956, i64 0, i32 4, i32 0), align 2
   %i5050 = ashr i128 %i5049, 99
   %i5051 = shl nsw i128 %i5050, 32
   %i5052 = trunc i128 %i5051 to i64
   %i5053 = ashr exact i64 %i5052, 32
   call fastcc void @transparent_crc(i64 %i5053, ptr @.str.2528, i32 signext undef)
-  %i5054 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2956, i64 0, i32 4, i32 0), align 2
+  %i5054 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2956, i64 0, i32 4, i32 0), align 2
   %i5055 = shl i128 %i5054, 29
   %i5056 = ashr i128 %i5055, 97
   %i5057 = shl nsw i128 %i5056, 32
   %i5058 = trunc i128 %i5057 to i64
   %i5059 = ashr exact i64 %i5058, 32
   call fastcc void @transparent_crc(i64 %i5059, ptr @.str.2529, i32 signext undef)
-  %i5060 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2956, i64 0, i32 4, i32 0), align 2
+  %i5060 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2956, i64 0, i32 4, i32 0), align 2
   %i5061 = shl i128 %i5060, 60
   %i5062 = ashr i128 %i5061, 108
   %i5063 = shl nsw i128 %i5062, 32
   %i5064 = trunc i128 %i5063 to i64
   %i5065 = ashr exact i64 %i5064, 32
   call fastcc void @transparent_crc(i64 %i5065, ptr @.str.2530, i32 signext undef)
-  %i5066 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2956, i64 0, i32 4, i32 0), align 2
+  %i5066 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2956, i64 0, i32 4, i32 0), align 2
   %i5067 = shl i128 %i5066, 80
   %i5068 = ashr i128 %i5067, 110
   %i5069 = shl nsw i128 %i5068, 32
   %i5070 = trunc i128 %i5069 to i64
   %i5071 = ashr exact i64 %i5070, 32
   call fastcc void @transparent_crc(i64 %i5071, ptr @.str.2531, i32 signext undef)
-  %i5072 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2956, i64 0, i32 4, i32 0), align 2
+  %i5072 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2956, i64 0, i32 4, i32 0), align 2
   %i5073 = lshr i128 %i5072, 28
   %i5074 = trunc i128 %i5073 to i64
   %i5075 = and i64 %i5074, 3
   call fastcc void @transparent_crc(i64 %i5075, ptr @.str.2532, i32 signext undef)
-  %i5076 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2956, i64 0, i32 4, i32 0), align 2
+  %i5076 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2956, i64 0, i32 4, i32 0), align 2
   %i5077 = shl i128 %i5076, 100
   %i5078 = ashr i128 %i5077, 107
   %i5079 = shl nsw i128 %i5078, 32
   %i5080 = trunc i128 %i5079 to i64
   %i5081 = ashr exact i64 %i5080, 32
   call fastcc void @transparent_crc(i64 %i5081, ptr @.str.2533, i32 signext undef)
-  %i5082 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2956, i64 0, i32 5, i32 0, i32 0), align 2
+  %i5082 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2956, i64 0, i32 5, i32 0, i32 0), align 2
   %i5083 = lshr i80 %i5082, 57
   %i5084 = trunc i80 %i5083 to i64
   call fastcc void @transparent_crc(i64 %i5084, ptr @.str.2534, i32 signext undef)
-  %i5085 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2956, i64 0, i32 5, i32 0, i32 0), align 2
+  %i5085 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2956, i64 0, i32 5, i32 0, i32 0), align 2
   %i5086 = shl i80 %i5085, 23
   %i5087 = ashr i80 %i5086, 64
   %i5088 = shl nsw i80 %i5087, 32
   %i5089 = trunc i80 %i5088 to i64
   %i5090 = ashr exact i64 %i5089, 32
   call fastcc void @transparent_crc(i64 %i5090, ptr @.str.2535, i32 signext undef)
-  %i5091 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2956, i64 0, i32 5, i32 0, i32 0), align 2
+  %i5091 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2956, i64 0, i32 5, i32 0, i32 0), align 2
   call fastcc void @transparent_crc(i64 undef, ptr @.str.2556, i32 signext undef)
-  %i5092 = load i64, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2957, i64 0, i32 3, i32 3), align 2, !tbaa !47
+  %i5092 = load i64, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2957, i64 0, i32 3, i32 3), align 2, !tbaa !47
   call fastcc void @transparent_crc(i64 %i5092, ptr @.str.2557, i32 signext undef)
-  %i5093 = load i32, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2957, i64 0, i32 3, i32 4), align 2, !tbaa !48
+  %i5093 = load i32, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2957, i64 0, i32 3, i32 4), align 2, !tbaa !48
   %i5094 = sext i32 %i5093 to i64
   call fastcc void @transparent_crc(i64 %i5094, ptr @.str.2558, i32 signext undef)
-  %i5095 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2957, i64 0, i32 4, i32 0), align 2
+  %i5095 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2957, i64 0, i32 4, i32 0), align 2
   call fastcc void @transparent_crc(i64 undef, ptr @.str.2559, i32 signext undef)
-  %i5096 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2957, i64 0, i32 4, i32 0), align 2
+  %i5096 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2957, i64 0, i32 4, i32 0), align 2
   %i5097 = shl i128 %i5096, 29
   %i5098 = ashr i128 %i5097, 97
   %i5099 = shl nsw i128 %i5098, 32
   %i5100 = trunc i128 %i5099 to i64
   %i5101 = ashr exact i64 %i5100, 32
   call fastcc void @transparent_crc(i64 %i5101, ptr @.str.2560, i32 signext undef)
-  %i5102 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2957, i64 0, i32 4, i32 0), align 2
+  %i5102 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2957, i64 0, i32 4, i32 0), align 2
   %i5103 = shl i128 %i5102, 60
   %i5104 = ashr i128 %i5103, 108
   %i5105 = shl nsw i128 %i5104, 32
   %i5106 = trunc i128 %i5105 to i64
   %i5107 = ashr exact i64 %i5106, 32
   call fastcc void @transparent_crc(i64 %i5107, ptr @.str.2561, i32 signext undef)
-  %i5108 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2957, i64 0, i32 4, i32 0), align 2
+  %i5108 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2957, i64 0, i32 4, i32 0), align 2
   %i5109 = shl i128 %i5108, 80
   %i5110 = ashr i128 %i5109, 110
   %i5111 = shl nsw i128 %i5110, 32
   %i5112 = trunc i128 %i5111 to i64
   %i5113 = ashr exact i64 %i5112, 32
   call fastcc void @transparent_crc(i64 %i5113, ptr @.str.2562, i32 signext undef)
-  %i5114 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2957, i64 0, i32 4, i32 0), align 2
+  %i5114 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2957, i64 0, i32 4, i32 0), align 2
   %i5115 = lshr i128 %i5114, 28
   %i5116 = trunc i128 %i5115 to i64
   %i5117 = and i64 %i5116, 3
   call fastcc void @transparent_crc(i64 %i5117, ptr @.str.2563, i32 signext undef)
-  %i5118 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2957, i64 0, i32 4, i32 0), align 2
+  %i5118 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2957, i64 0, i32 4, i32 0), align 2
   %i5119 = shl i128 %i5118, 100
   %i5120 = ashr i128 %i5119, 107
   %i5121 = shl nsw i128 %i5120, 32
   %i5122 = trunc i128 %i5121 to i64
   %i5123 = ashr exact i64 %i5122, 32
   call fastcc void @transparent_crc(i64 %i5123, ptr @.str.2564, i32 signext undef)
-  %i5124 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2957, i64 0, i32 5, i32 0, i32 0), align 2
+  %i5124 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2957, i64 0, i32 5, i32 0, i32 0), align 2
   %i5125 = lshr i80 %i5124, 57
   %i5126 = trunc i80 %i5125 to i64
   call fastcc void @transparent_crc(i64 %i5126, ptr @.str.2565, i32 signext undef)
-  %i5127 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2957, i64 0, i32 5, i32 0, i32 0), align 2
+  %i5127 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2957, i64 0, i32 5, i32 0, i32 0), align 2
   %i5128 = shl i80 %i5127, 23
   %i5129 = ashr i80 %i5128, 64
   %i5130 = shl nsw i80 %i5129, 32
   %i5131 = trunc i80 %i5130 to i64
   %i5132 = ashr exact i64 %i5131, 32
   call fastcc void @transparent_crc(i64 %i5132, ptr @.str.2566, i32 signext undef)
-  %i5133 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2957, i64 0, i32 5, i32 0, i32 0), align 2
+  %i5133 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2957, i64 0, i32 5, i32 0, i32 0), align 2
   %i5134 = shl i80 %i5133, 39
   %i5135 = ashr i80 %i5134, 62
   %i5136 = shl nsw i80 %i5135, 32
   %i5137 = trunc i80 %i5136 to i64
   %i5138 = ashr exact i64 %i5137, 32
   call fastcc void @transparent_crc(i64 %i5138, ptr @.str.2567, i32 signext undef)
-  %i5139 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2957, i64 0, i32 5, i32 0, i32 0), align 2
+  %i5139 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2957, i64 0, i32 5, i32 0, i32 0), align 2
   %i5140 = shl i80 %i5139, 57
   %i5141 = ashr i80 %i5140, 58
   %i5142 = shl nsw i80 %i5141, 32
   %i5143 = trunc i80 %i5142 to i64
   %i5144 = ashr exact i64 %i5143, 32
   call fastcc void @transparent_crc(i64 %i5144, ptr @.str.2568, i32 signext undef)
-  %i5145 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2957, i64 0, i32 5, i32 1), align 2
+  %i5145 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2957, i64 0, i32 5, i32 1), align 2
   %i5146 = lshr i80 %i5145, 49
   %i5147 = trunc i80 %i5146 to i64
   call fastcc void @transparent_crc(i64 %i5147, ptr @.str.2569, i32 signext undef)
-  %i5148 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2957, i64 0, i32 5, i32 1), align 2
+  %i5148 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2957, i64 0, i32 5, i32 1), align 2
   %i5149 = lshr i80 %i5148, 24
   %i5150 = trunc i80 %i5149 to i64
   %i5151 = and i64 %i5150, 33554431
   call fastcc void @transparent_crc(i64 %i5151, ptr @.str.2570, i32 signext undef)
-  %i5152 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2957, i64 0, i32 5, i32 1), align 2
+  %i5152 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2957, i64 0, i32 5, i32 1), align 2
   %i5153 = shl i80 %i5152, 56
   %i5154 = ashr i80 %i5153, 68
   %i5155 = shl nsw i80 %i5154, 32
   %i5156 = trunc i80 %i5155 to i64
   %i5157 = ashr exact i64 %i5156, 32
   call fastcc void @transparent_crc(i64 %i5157, ptr @.str.2571, i32 signext undef)
-  %i5158 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2957, i64 0, i32 5, i32 1), align 2
+  %i5158 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2957, i64 0, i32 5, i32 1), align 2
   %i5159 = lshr i80 %i5158, 11
   %i5160 = trunc i80 %i5159 to i64
   %i5161 = and i64 %i5160, 1
   call fastcc void @transparent_crc(i64 %i5161, ptr @.str.2572, i32 signext undef)
-  %i5162 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2957, i64 0, i32 5, i32 1), align 2
+  %i5162 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2957, i64 0, i32 5, i32 1), align 2
   %i5163 = shl i80 %i5162, 69
   %i5164 = ashr i80 %i5163, 72
   %i5165 = shl nsw i80 %i5164, 32
   %i5166 = trunc i80 %i5165 to i64
   %i5167 = ashr exact i64 %i5166, 32
   call fastcc void @transparent_crc(i64 %i5167, ptr @.str.2573, i32 signext undef)
-  %i5168 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2957, i64 0, i32 6), align 2, !tbaa !49
+  %i5168 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2957, i64 0, i32 6), align 2, !tbaa !49
   %i5169 = sext i16 %i5168 to i64
   call fastcc void @transparent_crc(i64 %i5169, ptr @.str.2574, i32 signext undef)
-  %i5170 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2957, i64 0, i32 7), align 2, !tbaa !50
+  %i5170 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2957, i64 0, i32 7), align 2, !tbaa !50
   %i5171 = zext i16 %i5170 to i64
   call fastcc void @transparent_crc(i64 %i5171, ptr @.str.2575, i32 signext undef)
-  %i5172 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2958, i64 0, i32 0), align 2, !tbaa !23
+  %i5172 = load i16, ptr @g_2958, align 2, !tbaa !23
   %i5173 = sext i16 %i5172 to i64
   call fastcc void @transparent_crc(i64 %i5173, ptr @.str.2576, i32 signext undef)
-  %i5174 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2958, i64 0, i32 1), align 2, !tbaa !51
+  %i5174 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2958, i64 0, i32 1), align 2, !tbaa !51
   %i5175 = sext i8 %i5174 to i64
   call fastcc void @transparent_crc(i64 %i5175, ptr @.str.2577, i32 signext undef)
-  %i5176 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2958, i64 0, i32 2, i32 0), align 1
+  %i5176 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2958, i64 0, i32 2, i32 0), align 1
   %i5177 = lshr i120 %i5176, 107
   %i5178 = trunc i120 %i5177 to i64
   call fastcc void @transparent_crc(i64 %i5178, ptr @.str.2578, i32 signext undef)
-  %i5179 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2958, i64 0, i32 2, i32 0), align 1
+  %i5179 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2958, i64 0, i32 2, i32 0), align 1
   %i5180 = lshr i120 %i5179, 78
   %i5181 = trunc i120 %i5180 to i64
   %i5182 = and i64 %i5181, 536870911
   call fastcc void @transparent_crc(i64 %i5182, ptr @.str.2579, i32 signext undef)
-  %i5183 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2958, i64 0, i32 2, i32 0), align 1
+  %i5183 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2958, i64 0, i32 2, i32 0), align 1
   %i5184 = shl i120 %i5183, 42
   %i5185 = ashr i120 %i5184, 104
   %i5186 = shl nsw i120 %i5185, 32
   %i5187 = trunc i120 %i5186 to i64
   %i5188 = ashr exact i64 %i5187, 32
   call fastcc void @transparent_crc(i64 %i5188, ptr @.str.2580, i32 signext undef)
-  %i5189 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2958, i64 0, i32 2, i32 0), align 1
+  %i5189 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2958, i64 0, i32 2, i32 0), align 1
   %i5190 = shl i120 %i5189, 58
   %i5191 = ashr i120 %i5190, 105
   %i5192 = shl nsw i120 %i5191, 32
   %i5193 = trunc i120 %i5192 to i64
   %i5194 = ashr exact i64 %i5193, 32
   call fastcc void @transparent_crc(i64 %i5194, ptr @.str.2581, i32 signext undef)
-  %i5195 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2958, i64 0, i32 2, i32 0), align 1
+  %i5195 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2958, i64 0, i32 2, i32 0), align 1
   %i5196 = lshr i120 %i5195, 41
   %i5197 = trunc i120 %i5196 to i64
   %i5198 = and i64 %i5197, 63
   call fastcc void @transparent_crc(i64 %i5198, ptr @.str.2582, i32 signext undef)
-  %i5199 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2958, i64 0, i32 2, i32 0), align 1
+  %i5199 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2958, i64 0, i32 2, i32 0), align 1
   %i5200 = lshr i120 %i5199, 19
   %i5201 = trunc i120 %i5200 to i64
   %i5202 = and i64 %i5201, 4194303
   call fastcc void @transparent_crc(i64 %i5202, ptr @.str.2583, i32 signext undef)
-  %i5203 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2958, i64 0, i32 2, i32 0), align 1
+  %i5203 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2958, i64 0, i32 2, i32 0), align 1
   %i5204 = shl i120 %i5203, 101
   %i5205 = ashr exact i120 %i5204, 69
   %i5206 = trunc i120 %i5205 to i64
   %i5207 = ashr exact i64 %i5206, 32
   call fastcc void @transparent_crc(i64 %i5207, ptr @.str.2584, i32 signext undef)
-  %i5208 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2958, i64 0, i32 3, i32 0), align 2, !tbaa !44
+  %i5208 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2958, i64 0, i32 3, i32 0), align 2, !tbaa !44
   %i5209 = zext i8 %i5208 to i64
   call fastcc void @transparent_crc(i64 %i5209, ptr @.str.2585, i32 signext undef)
-  %i5210 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2958, i64 0, i32 3, i32 1), align 1, !tbaa !45
+  %i5210 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2958, i64 0, i32 3, i32 1), align 1, !tbaa !45
   %i5211 = sext i8 %i5210 to i64
   call fastcc void @transparent_crc(i64 %i5211, ptr @.str.2586, i32 signext undef)
-  %i5212 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2958, i64 0, i32 3, i32 2), align 2, !tbaa !46
+  %i5212 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2958, i64 0, i32 3, i32 2), align 2, !tbaa !46
   %i5213 = sext i16 %i5212 to i64
   call fastcc void @transparent_crc(i64 %i5213, ptr @.str.2587, i32 signext undef)
-  %i5214 = load i64, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2958, i64 0, i32 3, i32 3), align 2, !tbaa !47
+  %i5214 = load i64, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2958, i64 0, i32 3, i32 3), align 2, !tbaa !47
   call fastcc void @transparent_crc(i64 %i5214, ptr @.str.2588, i32 signext undef)
-  %i5215 = load i32, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2958, i64 0, i32 3, i32 4), align 2, !tbaa !48
+  %i5215 = load i32, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2958, i64 0, i32 3, i32 4), align 2, !tbaa !48
   %i5216 = sext i32 %i5215 to i64
   call fastcc void @transparent_crc(i64 %i5216, ptr @.str.2589, i32 signext undef)
-  %i5217 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2958, i64 0, i32 4, i32 0), align 2
+  %i5217 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2958, i64 0, i32 4, i32 0), align 2
   %i5218 = ashr i128 %i5217, 99
   %i5219 = shl nsw i128 %i5218, 32
   %i5220 = trunc i128 %i5219 to i64
   %i5221 = ashr exact i64 %i5220, 32
   call fastcc void @transparent_crc(i64 %i5221, ptr @.str.2590, i32 signext undef)
-  %i5222 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2958, i64 0, i32 4, i32 0), align 2
+  %i5222 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2958, i64 0, i32 4, i32 0), align 2
   %i5223 = shl i128 %i5222, 29
   %i5224 = ashr i128 %i5223, 97
   %i5225 = shl nsw i128 %i5224, 32
   %i5226 = trunc i128 %i5225 to i64
   %i5227 = ashr exact i64 %i5226, 32
   call fastcc void @transparent_crc(i64 %i5227, ptr @.str.2591, i32 signext undef)
-  %i5228 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2958, i64 0, i32 4, i32 0), align 2
+  %i5228 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2958, i64 0, i32 4, i32 0), align 2
   %i5229 = shl i128 %i5228, 60
   %i5230 = ashr i128 %i5229, 108
   %i5231 = shl nsw i128 %i5230, 32
   %i5232 = trunc i128 %i5231 to i64
   %i5233 = ashr exact i64 %i5232, 32
   call fastcc void @transparent_crc(i64 %i5233, ptr @.str.2592, i32 signext undef)
-  %i5234 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2958, i64 0, i32 4, i32 0), align 2
+  %i5234 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2958, i64 0, i32 4, i32 0), align 2
   %i5235 = shl i128 %i5234, 80
   %i5236 = ashr i128 %i5235, 110
   %i5237 = shl nsw i128 %i5236, 32
   %i5238 = trunc i128 %i5237 to i64
   %i5239 = ashr exact i64 %i5238, 32
   call fastcc void @transparent_crc(i64 %i5239, ptr @.str.2593, i32 signext undef)
-  %i5240 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2958, i64 0, i32 4, i32 0), align 2
+  %i5240 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2958, i64 0, i32 4, i32 0), align 2
   %i5241 = lshr i128 %i5240, 28
   %i5242 = trunc i128 %i5241 to i64
   %i5243 = and i64 %i5242, 3
   call fastcc void @transparent_crc(i64 %i5243, ptr @.str.2594, i32 signext undef)
-  %i5244 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2958, i64 0, i32 4, i32 0), align 2
+  %i5244 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2958, i64 0, i32 4, i32 0), align 2
   %i5245 = shl i128 %i5244, 100
   %i5246 = ashr i128 %i5245, 107
   %i5247 = shl nsw i128 %i5246, 32
   %i5248 = trunc i128 %i5247 to i64
   %i5249 = ashr exact i64 %i5248, 32
   call fastcc void @transparent_crc(i64 %i5249, ptr @.str.2595, i32 signext undef)
-  %i5250 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2958, i64 0, i32 5, i32 0, i32 0), align 2
+  %i5250 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2958, i64 0, i32 5, i32 0, i32 0), align 2
   %i5251 = lshr i80 %i5250, 57
   %i5252 = trunc i80 %i5251 to i64
   call fastcc void @transparent_crc(i64 %i5252, ptr @.str.2596, i32 signext undef)
-  %i5253 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2958, i64 0, i32 5, i32 0, i32 0), align 2
+  %i5253 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2958, i64 0, i32 5, i32 0, i32 0), align 2
   %i5254 = shl i80 %i5253, 23
   %i5255 = ashr i80 %i5254, 64
   %i5256 = shl nsw i80 %i5255, 32
   %i5257 = trunc i80 %i5256 to i64
   %i5258 = ashr exact i64 %i5257, 32
   call fastcc void @transparent_crc(i64 %i5258, ptr @.str.2597, i32 signext undef)
-  %i5259 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2958, i64 0, i32 5, i32 0, i32 0), align 2
+  %i5259 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2958, i64 0, i32 5, i32 0, i32 0), align 2
   %i5260 = shl i80 %i5259, 39
   %i5261 = ashr i80 %i5260, 62
   %i5262 = shl nsw i80 %i5261, 32
   %i5263 = trunc i80 %i5262 to i64
   %i5264 = ashr exact i64 %i5263, 32
   call fastcc void @transparent_crc(i64 %i5264, ptr @.str.2598, i32 signext undef)
-  %i5265 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2958, i64 0, i32 5, i32 0, i32 0), align 2
+  %i5265 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2958, i64 0, i32 5, i32 0, i32 0), align 2
   %i5266 = shl i80 %i5265, 57
   %i5267 = ashr i80 %i5266, 58
   %i5268 = shl nsw i80 %i5267, 32
   %i5269 = trunc i80 %i5268 to i64
   %i5270 = ashr exact i64 %i5269, 32
   call fastcc void @transparent_crc(i64 %i5270, ptr @.str.2599, i32 signext undef)
-  %i5271 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2958, i64 0, i32 5, i32 1), align 2
+  %i5271 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2958, i64 0, i32 5, i32 1), align 2
   %i5272 = lshr i80 %i5271, 49
   %i5273 = trunc i80 %i5272 to i64
   call fastcc void @transparent_crc(i64 %i5273, ptr @.str.2600, i32 signext undef)
-  %i5274 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2958, i64 0, i32 5, i32 1), align 2
+  %i5274 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2958, i64 0, i32 5, i32 1), align 2
   %i5275 = lshr i80 %i5274, 24
   %i5276 = trunc i80 %i5275 to i64
   %i5277 = and i64 %i5276, 33554431
   call fastcc void @transparent_crc(i64 %i5277, ptr @.str.2601, i32 signext undef)
   call fastcc void @transparent_crc(i64 undef, ptr @.str.2602, i32 signext undef)
-  %i5278 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2958, i64 0, i32 5, i32 1), align 2
+  %i5278 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2958, i64 0, i32 5, i32 1), align 2
   %i5279 = lshr i80 %i5278, 11
   %i5280 = trunc i80 %i5279 to i64
   %i5281 = and i64 %i5280, 1
   call fastcc void @transparent_crc(i64 %i5281, ptr @.str.2603, i32 signext undef)
-  %i5282 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2958, i64 0, i32 5, i32 1), align 2
+  %i5282 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2958, i64 0, i32 5, i32 1), align 2
   %i5283 = shl i80 %i5282, 69
   %i5284 = ashr i80 %i5283, 72
   %i5285 = shl nsw i80 %i5284, 32
   %i5286 = trunc i80 %i5285 to i64
   %i5287 = ashr exact i64 %i5286, 32
   call fastcc void @transparent_crc(i64 %i5287, ptr @.str.2604, i32 signext undef)
-  %i5288 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2958, i64 0, i32 6), align 2, !tbaa !49
+  %i5288 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2958, i64 0, i32 6), align 2, !tbaa !49
   %i5289 = sext i16 %i5288 to i64
   call fastcc void @transparent_crc(i64 %i5289, ptr @.str.2605, i32 signext undef)
-  %i5290 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2958, i64 0, i32 7), align 2, !tbaa !50
+  %i5290 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2958, i64 0, i32 7), align 2, !tbaa !50
   %i5291 = zext i16 %i5290 to i64
   call fastcc void @transparent_crc(i64 %i5291, ptr @.str.2606, i32 signext undef)
-  %i5292 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2959, i64 0, i32 0), align 2, !tbaa !23
+  %i5292 = load i16, ptr @g_2959, align 2, !tbaa !23
   %i5293 = sext i16 %i5292 to i64
   call fastcc void @transparent_crc(i64 %i5293, ptr @.str.2607, i32 signext undef)
-  %i5294 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2959, i64 0, i32 1), align 2, !tbaa !51
+  %i5294 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2959, i64 0, i32 1), align 2, !tbaa !51
   %i5295 = sext i8 %i5294 to i64
   call fastcc void @transparent_crc(i64 %i5295, ptr @.str.2608, i32 signext undef)
-  %i5296 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2959, i64 0, i32 2, i32 0), align 1
+  %i5296 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2959, i64 0, i32 2, i32 0), align 1
   %i5297 = lshr i120 %i5296, 107
   %i5298 = trunc i120 %i5297 to i64
   call fastcc void @transparent_crc(i64 %i5298, ptr @.str.2609, i32 signext undef)
-  %i5299 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2959, i64 0, i32 2, i32 0), align 1
+  %i5299 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2959, i64 0, i32 2, i32 0), align 1
   %i5300 = lshr i120 %i5299, 78
   %i5301 = trunc i120 %i5300 to i64
   %i5302 = and i64 %i5301, 536870911
   call fastcc void @transparent_crc(i64 %i5302, ptr @.str.2610, i32 signext undef)
-  %i5303 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2959, i64 0, i32 2, i32 0), align 1
+  %i5303 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2959, i64 0, i32 2, i32 0), align 1
   call fastcc void @transparent_crc(i64 0, ptr @.str.2634, i32 signext undef)
-  %i5304 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2959, i64 0, i32 5, i32 1), align 2
+  %i5304 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2959, i64 0, i32 5, i32 1), align 2
   %i5305 = shl i80 %i5304, 69
   %i5306 = ashr i80 %i5305, 72
   %i5307 = shl nsw i80 %i5306, 32
   %i5308 = trunc i80 %i5307 to i64
   %i5309 = ashr exact i64 %i5308, 32
   call fastcc void @transparent_crc(i64 %i5309, ptr @.str.2635, i32 signext undef)
-  %i5310 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2959, i64 0, i32 6), align 2, !tbaa !49
+  %i5310 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2959, i64 0, i32 6), align 2, !tbaa !49
   %i5311 = sext i16 %i5310 to i64
   call fastcc void @transparent_crc(i64 %i5311, ptr @.str.2636, i32 signext undef)
-  %i5312 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2959, i64 0, i32 7), align 2, !tbaa !50
+  %i5312 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2959, i64 0, i32 7), align 2, !tbaa !50
   %i5313 = zext i16 %i5312 to i64
   call fastcc void @transparent_crc(i64 %i5313, ptr @.str.2637, i32 signext undef)
-  %i5314 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2960, i64 0, i32 0), align 2, !tbaa !23
+  %i5314 = load i16, ptr @g_2960, align 2, !tbaa !23
   %i5315 = sext i16 %i5314 to i64
   call fastcc void @transparent_crc(i64 %i5315, ptr @.str.2638, i32 signext undef)
-  %i5316 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2960, i64 0, i32 1), align 2, !tbaa !51
+  %i5316 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2960, i64 0, i32 1), align 2, !tbaa !51
   %i5317 = sext i8 %i5316 to i64
   call fastcc void @transparent_crc(i64 %i5317, ptr @.str.2639, i32 signext undef)
-  %i5318 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2960, i64 0, i32 2, i32 0), align 1
+  %i5318 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2960, i64 0, i32 2, i32 0), align 1
   %i5319 = lshr i120 %i5318, 107
   %i5320 = trunc i120 %i5319 to i64
   call fastcc void @transparent_crc(i64 %i5320, ptr @.str.2640, i32 signext undef)
-  %i5321 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2960, i64 0, i32 2, i32 0), align 1
+  %i5321 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2960, i64 0, i32 2, i32 0), align 1
   %i5322 = lshr i120 %i5321, 78
   %i5323 = trunc i120 %i5322 to i64
   %i5324 = and i64 %i5323, 536870911
   call fastcc void @transparent_crc(i64 %i5324, ptr @.str.2641, i32 signext undef)
-  %i5325 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2960, i64 0, i32 2, i32 0), align 1
+  %i5325 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2960, i64 0, i32 2, i32 0), align 1
   %i5326 = shl i120 %i5325, 42
   %i5327 = ashr i120 %i5326, 104
   %i5328 = shl nsw i120 %i5327, 32
   %i5329 = trunc i120 %i5328 to i64
   %i5330 = ashr exact i64 %i5329, 32
   call fastcc void @transparent_crc(i64 %i5330, ptr @.str.2642, i32 signext undef)
-  %i5331 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2960, i64 0, i32 2, i32 0), align 1
+  %i5331 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2960, i64 0, i32 2, i32 0), align 1
   %i5332 = shl i120 %i5331, 58
   %i5333 = ashr i120 %i5332, 105
   %i5334 = shl nsw i120 %i5333, 32
   %i5335 = trunc i120 %i5334 to i64
   %i5336 = ashr exact i64 %i5335, 32
   call fastcc void @transparent_crc(i64 %i5336, ptr @.str.2643, i32 signext undef)
-  %i5337 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2960, i64 0, i32 2, i32 0), align 1
+  %i5337 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2960, i64 0, i32 2, i32 0), align 1
   %i5338 = lshr i120 %i5337, 41
   %i5339 = trunc i120 %i5338 to i64
   %i5340 = and i64 %i5339, 63
   call fastcc void @transparent_crc(i64 %i5340, ptr @.str.2644, i32 signext undef)
-  %i5341 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2960, i64 0, i32 2, i32 0), align 1
+  %i5341 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2960, i64 0, i32 2, i32 0), align 1
   %i5342 = lshr i120 %i5341, 19
   %i5343 = trunc i120 %i5342 to i64
   %i5344 = and i64 %i5343, 4194303
   call fastcc void @transparent_crc(i64 %i5344, ptr @.str.2645, i32 signext undef)
-  %i5345 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2960, i64 0, i32 2, i32 0), align 1
+  %i5345 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2960, i64 0, i32 2, i32 0), align 1
   %i5346 = shl i120 %i5345, 101
   %i5347 = ashr exact i120 %i5346, 69
   %i5348 = trunc i120 %i5347 to i64
   %i5349 = ashr exact i64 %i5348, 32
   call fastcc void @transparent_crc(i64 %i5349, ptr @.str.2646, i32 signext undef)
-  %i5350 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2960, i64 0, i32 3, i32 0), align 2, !tbaa !44
+  %i5350 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2960, i64 0, i32 3, i32 0), align 2, !tbaa !44
   %i5351 = zext i8 %i5350 to i64
   call fastcc void @transparent_crc(i64 %i5351, ptr @.str.2647, i32 signext undef)
-  %i5352 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2960, i64 0, i32 3, i32 1), align 1, !tbaa !45
+  %i5352 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2960, i64 0, i32 3, i32 1), align 1, !tbaa !45
   %i5353 = sext i8 %i5352 to i64
   call fastcc void @transparent_crc(i64 %i5353, ptr @.str.2648, i32 signext undef)
-  %i5354 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2960, i64 0, i32 3, i32 2), align 2, !tbaa !46
+  %i5354 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2960, i64 0, i32 3, i32 2), align 2, !tbaa !46
   %i5355 = sext i16 %i5354 to i64
   call fastcc void @transparent_crc(i64 %i5355, ptr @.str.2649, i32 signext undef)
-  %i5356 = load i64, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2960, i64 0, i32 3, i32 3), align 2, !tbaa !47
+  %i5356 = load i64, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2960, i64 0, i32 3, i32 3), align 2, !tbaa !47
   call fastcc void @transparent_crc(i64 %i5356, ptr @.str.2650, i32 signext undef)
-  %i5357 = load i32, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2960, i64 0, i32 3, i32 4), align 2, !tbaa !48
+  %i5357 = load i32, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2960, i64 0, i32 3, i32 4), align 2, !tbaa !48
   %i5358 = sext i32 %i5357 to i64
   call fastcc void @transparent_crc(i64 %i5358, ptr @.str.2651, i32 signext undef)
-  %i5359 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2960, i64 0, i32 4, i32 0), align 2
+  %i5359 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2960, i64 0, i32 4, i32 0), align 2
   %i5360 = ashr i128 %i5359, 99
   %i5361 = shl nsw i128 %i5360, 32
   %i5362 = trunc i128 %i5361 to i64
   %i5363 = ashr exact i64 %i5362, 32
   call fastcc void @transparent_crc(i64 %i5363, ptr @.str.2652, i32 signext undef)
-  %i5364 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2960, i64 0, i32 4, i32 0), align 2
+  %i5364 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2960, i64 0, i32 4, i32 0), align 2
   %i5365 = shl i128 %i5364, 29
   %i5366 = ashr i128 %i5365, 97
   %i5367 = shl nsw i128 %i5366, 32
   %i5368 = trunc i128 %i5367 to i64
   %i5369 = ashr exact i64 %i5368, 32
   call fastcc void @transparent_crc(i64 %i5369, ptr @.str.2653, i32 signext undef)
-  %i5370 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2960, i64 0, i32 4, i32 0), align 2
+  %i5370 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2960, i64 0, i32 4, i32 0), align 2
   %i5371 = shl i128 %i5370, 60
   %i5372 = ashr i128 %i5371, 108
   %i5373 = shl nsw i128 %i5372, 32
   %i5374 = trunc i128 %i5373 to i64
   %i5375 = ashr exact i64 %i5374, 32
   call fastcc void @transparent_crc(i64 %i5375, ptr @.str.2654, i32 signext undef)
-  %i5376 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2960, i64 0, i32 4, i32 0), align 2
+  %i5376 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2960, i64 0, i32 4, i32 0), align 2
   %i5377 = shl i128 %i5376, 80
   %i5378 = ashr i128 %i5377, 110
   %i5379 = shl nsw i128 %i5378, 32
   %i5380 = trunc i128 %i5379 to i64
   %i5381 = ashr exact i64 %i5380, 32
   call fastcc void @transparent_crc(i64 %i5381, ptr @.str.2655, i32 signext undef)
-  %i5382 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2960, i64 0, i32 4, i32 0), align 2
+  %i5382 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2960, i64 0, i32 4, i32 0), align 2
   %i5383 = lshr i128 %i5382, 28
   %i5384 = trunc i128 %i5383 to i64
   %i5385 = and i64 %i5384, 3
   call fastcc void @transparent_crc(i64 %i5385, ptr @.str.2656, i32 signext undef)
-  %i5386 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2960, i64 0, i32 4, i32 0), align 2
+  %i5386 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2960, i64 0, i32 4, i32 0), align 2
   %i5387 = shl i128 %i5386, 100
   %i5388 = ashr i128 %i5387, 107
   %i5389 = shl nsw i128 %i5388, 32
   %i5390 = trunc i128 %i5389 to i64
   %i5391 = ashr exact i64 %i5390, 32
   call fastcc void @transparent_crc(i64 %i5391, ptr @.str.2657, i32 signext undef)
-  %i5392 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2960, i64 0, i32 5, i32 0, i32 0), align 2
+  %i5392 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2960, i64 0, i32 5, i32 0, i32 0), align 2
   %i5393 = lshr i80 %i5392, 57
   %i5394 = trunc i80 %i5393 to i64
   call fastcc void @transparent_crc(i64 %i5394, ptr @.str.2658, i32 signext undef)
-  %i5395 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2960, i64 0, i32 5, i32 0, i32 0), align 2
+  %i5395 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2960, i64 0, i32 5, i32 0, i32 0), align 2
   %i5396 = shl i80 %i5395, 23
   %i5397 = ashr i80 %i5396, 64
   %i5398 = shl nsw i80 %i5397, 32
   %i5399 = trunc i80 %i5398 to i64
   %i5400 = ashr exact i64 %i5399, 32
   call fastcc void @transparent_crc(i64 %i5400, ptr @.str.2659, i32 signext undef)
-  %i5401 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2960, i64 0, i32 5, i32 0, i32 0), align 2
+  %i5401 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2960, i64 0, i32 5, i32 0, i32 0), align 2
   %i5402 = shl i80 %i5401, 39
   %i5403 = ashr i80 %i5402, 62
   %i5404 = shl nsw i80 %i5403, 32
   %i5405 = trunc i80 %i5404 to i64
   %i5406 = ashr exact i64 %i5405, 32
   call fastcc void @transparent_crc(i64 %i5406, ptr @.str.2660, i32 signext undef)
-  %i5407 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2960, i64 0, i32 5, i32 0, i32 0), align 2
+  %i5407 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2960, i64 0, i32 5, i32 0, i32 0), align 2
   %i5408 = shl i80 %i5407, 57
   %i5409 = ashr i80 %i5408, 58
   %i5410 = shl nsw i80 %i5409, 32
@@ -10286,864 +10286,864 @@ bb25:                                             ; preds = %bb15
   %i5412 = ashr exact i64 %i5411, 32
   call fastcc void @transparent_crc(i64 %i5412, ptr @.str.2661, i32 signext undef)
   call fastcc void @transparent_crc(i64 0, ptr @.str.2679, i32 signext undef)
-  %i5413 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2961, i64 0, i32 3, i32 2), align 2, !tbaa !46
+  %i5413 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2961, i64 0, i32 3, i32 2), align 2, !tbaa !46
   %i5414 = sext i16 %i5413 to i64
   call fastcc void @transparent_crc(i64 %i5414, ptr @.str.2680, i32 signext undef)
-  %i5415 = load i64, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2961, i64 0, i32 3, i32 3), align 2, !tbaa !47
+  %i5415 = load i64, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2961, i64 0, i32 3, i32 3), align 2, !tbaa !47
   call fastcc void @transparent_crc(i64 %i5415, ptr @.str.2681, i32 signext undef)
-  %i5416 = load i32, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2961, i64 0, i32 3, i32 4), align 2, !tbaa !48
+  %i5416 = load i32, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2961, i64 0, i32 3, i32 4), align 2, !tbaa !48
   %i5417 = sext i32 %i5416 to i64
   call fastcc void @transparent_crc(i64 %i5417, ptr @.str.2682, i32 signext undef)
-  %i5418 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2961, i64 0, i32 4, i32 0), align 2
+  %i5418 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2961, i64 0, i32 4, i32 0), align 2
   %i5419 = ashr i128 %i5418, 99
   %i5420 = shl nsw i128 %i5419, 32
   %i5421 = trunc i128 %i5420 to i64
   %i5422 = ashr exact i64 %i5421, 32
   call fastcc void @transparent_crc(i64 %i5422, ptr @.str.2683, i32 signext undef)
-  %i5423 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2961, i64 0, i32 4, i32 0), align 2
+  %i5423 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2961, i64 0, i32 4, i32 0), align 2
   %i5424 = shl i128 %i5423, 29
   %i5425 = ashr i128 %i5424, 97
   %i5426 = shl nsw i128 %i5425, 32
   %i5427 = trunc i128 %i5426 to i64
   %i5428 = ashr exact i64 %i5427, 32
   call fastcc void @transparent_crc(i64 %i5428, ptr @.str.2684, i32 signext undef)
-  %i5429 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2961, i64 0, i32 4, i32 0), align 2
+  %i5429 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2961, i64 0, i32 4, i32 0), align 2
   %i5430 = shl i128 %i5429, 60
   %i5431 = ashr i128 %i5430, 108
   %i5432 = shl nsw i128 %i5431, 32
   %i5433 = trunc i128 %i5432 to i64
   %i5434 = ashr exact i64 %i5433, 32
   call fastcc void @transparent_crc(i64 %i5434, ptr @.str.2685, i32 signext undef)
-  %i5435 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2961, i64 0, i32 4, i32 0), align 2
+  %i5435 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2961, i64 0, i32 4, i32 0), align 2
   %i5436 = shl i128 %i5435, 80
   %i5437 = ashr i128 %i5436, 110
   %i5438 = shl nsw i128 %i5437, 32
   %i5439 = trunc i128 %i5438 to i64
   %i5440 = ashr exact i64 %i5439, 32
   call fastcc void @transparent_crc(i64 %i5440, ptr @.str.2686, i32 signext undef)
-  %i5441 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2961, i64 0, i32 4, i32 0), align 2
+  %i5441 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2961, i64 0, i32 4, i32 0), align 2
   %i5442 = lshr i128 %i5441, 28
   %i5443 = trunc i128 %i5442 to i64
   %i5444 = and i64 %i5443, 3
   call fastcc void @transparent_crc(i64 %i5444, ptr @.str.2687, i32 signext undef)
-  %i5445 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2961, i64 0, i32 4, i32 0), align 2
+  %i5445 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2961, i64 0, i32 4, i32 0), align 2
   %i5446 = shl i128 %i5445, 100
   %i5447 = ashr i128 %i5446, 107
   %i5448 = shl nsw i128 %i5447, 32
   %i5449 = trunc i128 %i5448 to i64
   %i5450 = ashr exact i64 %i5449, 32
   call fastcc void @transparent_crc(i64 %i5450, ptr @.str.2688, i32 signext undef)
-  %i5451 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2961, i64 0, i32 5, i32 0, i32 0), align 2
+  %i5451 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2961, i64 0, i32 5, i32 0, i32 0), align 2
   %i5452 = lshr i80 %i5451, 57
   %i5453 = trunc i80 %i5452 to i64
   call fastcc void @transparent_crc(i64 %i5453, ptr @.str.2689, i32 signext undef)
-  %i5454 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2961, i64 0, i32 5, i32 0, i32 0), align 2
+  %i5454 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2961, i64 0, i32 5, i32 0, i32 0), align 2
   %i5455 = shl i80 %i5454, 23
   %i5456 = ashr i80 %i5455, 64
   %i5457 = shl nsw i80 %i5456, 32
   %i5458 = trunc i80 %i5457 to i64
   %i5459 = ashr exact i64 %i5458, 32
   call fastcc void @transparent_crc(i64 %i5459, ptr @.str.2690, i32 signext undef)
-  %i5460 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2961, i64 0, i32 5, i32 0, i32 0), align 2
+  %i5460 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2961, i64 0, i32 5, i32 0, i32 0), align 2
   %i5461 = shl i80 %i5460, 39
   %i5462 = ashr i80 %i5461, 62
   %i5463 = shl nsw i80 %i5462, 32
   %i5464 = trunc i80 %i5463 to i64
   %i5465 = ashr exact i64 %i5464, 32
   call fastcc void @transparent_crc(i64 %i5465, ptr @.str.2691, i32 signext undef)
-  %i5466 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2961, i64 0, i32 5, i32 0, i32 0), align 2
+  %i5466 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2961, i64 0, i32 5, i32 0, i32 0), align 2
   %i5467 = shl i80 %i5466, 57
   %i5468 = ashr i80 %i5467, 58
   %i5469 = shl nsw i80 %i5468, 32
   %i5470 = trunc i80 %i5469 to i64
   %i5471 = ashr exact i64 %i5470, 32
   call fastcc void @transparent_crc(i64 %i5471, ptr @.str.2692, i32 signext undef)
-  %i5472 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2961, i64 0, i32 5, i32 1), align 2
+  %i5472 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2961, i64 0, i32 5, i32 1), align 2
   %i5473 = lshr i80 %i5472, 49
   %i5474 = trunc i80 %i5473 to i64
   call fastcc void @transparent_crc(i64 %i5474, ptr @.str.2693, i32 signext undef)
-  %i5475 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2961, i64 0, i32 5, i32 1), align 2
+  %i5475 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2961, i64 0, i32 5, i32 1), align 2
   %i5476 = lshr i80 %i5475, 24
   %i5477 = trunc i80 %i5476 to i64
   %i5478 = and i64 %i5477, 33554431
   call fastcc void @transparent_crc(i64 %i5478, ptr @.str.2694, i32 signext undef)
-  %i5479 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2961, i64 0, i32 5, i32 1), align 2
+  %i5479 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2961, i64 0, i32 5, i32 1), align 2
   %i5480 = shl i80 %i5479, 56
   %i5481 = ashr i80 %i5480, 68
   %i5482 = shl nsw i80 %i5481, 32
   %i5483 = trunc i80 %i5482 to i64
   %i5484 = ashr exact i64 %i5483, 32
   call fastcc void @transparent_crc(i64 %i5484, ptr @.str.2695, i32 signext undef)
-  %i5485 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2961, i64 0, i32 5, i32 1), align 2
+  %i5485 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2961, i64 0, i32 5, i32 1), align 2
   %i5486 = lshr i80 %i5485, 11
   %i5487 = trunc i80 %i5486 to i64
   %i5488 = and i64 %i5487, 1
   call fastcc void @transparent_crc(i64 %i5488, ptr @.str.2696, i32 signext undef)
-  %i5489 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2961, i64 0, i32 5, i32 1), align 2
+  %i5489 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2961, i64 0, i32 5, i32 1), align 2
   %i5490 = shl i80 %i5489, 69
   %i5491 = ashr i80 %i5490, 72
   %i5492 = shl nsw i80 %i5491, 32
   %i5493 = trunc i80 %i5492 to i64
   %i5494 = ashr exact i64 %i5493, 32
   call fastcc void @transparent_crc(i64 %i5494, ptr @.str.2697, i32 signext undef)
-  %i5495 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2961, i64 0, i32 6), align 2, !tbaa !49
+  %i5495 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2961, i64 0, i32 6), align 2, !tbaa !49
   %i5496 = sext i16 %i5495 to i64
   call fastcc void @transparent_crc(i64 %i5496, ptr @.str.2698, i32 signext undef)
-  %i5497 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2961, i64 0, i32 7), align 2, !tbaa !50
+  %i5497 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2961, i64 0, i32 7), align 2, !tbaa !50
   %i5498 = zext i16 %i5497 to i64
   call fastcc void @transparent_crc(i64 %i5498, ptr @.str.2699, i32 signext undef)
-  %i5499 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2962, i64 0, i32 0), align 2, !tbaa !23
+  %i5499 = load i16, ptr @g_2962, align 2, !tbaa !23
   %i5500 = sext i16 %i5499 to i64
   call fastcc void @transparent_crc(i64 %i5500, ptr @.str.2700, i32 signext undef)
-  %i5501 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2962, i64 0, i32 1), align 2, !tbaa !51
+  %i5501 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2962, i64 0, i32 1), align 2, !tbaa !51
   %i5502 = sext i8 %i5501 to i64
   call fastcc void @transparent_crc(i64 %i5502, ptr @.str.2701, i32 signext undef)
-  %i5503 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2962, i64 0, i32 2, i32 0), align 1
+  %i5503 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2962, i64 0, i32 2, i32 0), align 1
   %i5504 = lshr i120 %i5503, 107
   %i5505 = trunc i120 %i5504 to i64
   call fastcc void @transparent_crc(i64 %i5505, ptr @.str.2702, i32 signext undef)
-  %i5506 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2962, i64 0, i32 2, i32 0), align 1
+  %i5506 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2962, i64 0, i32 2, i32 0), align 1
   %i5507 = lshr i120 %i5506, 78
   %i5508 = trunc i120 %i5507 to i64
   %i5509 = and i64 %i5508, 536870911
   call fastcc void @transparent_crc(i64 %i5509, ptr @.str.2703, i32 signext undef)
-  %i5510 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2962, i64 0, i32 2, i32 0), align 1
+  %i5510 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2962, i64 0, i32 2, i32 0), align 1
   %i5511 = shl i120 %i5510, 42
   %i5512 = ashr i120 %i5511, 104
   %i5513 = shl nsw i120 %i5512, 32
   %i5514 = trunc i120 %i5513 to i64
   %i5515 = ashr exact i64 %i5514, 32
   call fastcc void @transparent_crc(i64 %i5515, ptr @.str.2704, i32 signext undef)
-  %i5516 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2962, i64 0, i32 2, i32 0), align 1
+  %i5516 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2962, i64 0, i32 2, i32 0), align 1
   %i5517 = shl i120 %i5516, 58
   %i5518 = ashr i120 %i5517, 105
   %i5519 = shl nsw i120 %i5518, 32
   %i5520 = trunc i120 %i5519 to i64
   %i5521 = ashr exact i64 %i5520, 32
   call fastcc void @transparent_crc(i64 %i5521, ptr @.str.2705, i32 signext undef)
-  %i5522 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2962, i64 0, i32 2, i32 0), align 1
+  %i5522 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2962, i64 0, i32 2, i32 0), align 1
   %i5523 = lshr i120 %i5522, 41
   %i5524 = trunc i120 %i5523 to i64
   %i5525 = and i64 %i5524, 63
   call fastcc void @transparent_crc(i64 %i5525, ptr @.str.2706, i32 signext undef)
-  %i5526 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2962, i64 0, i32 2, i32 0), align 1
+  %i5526 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2962, i64 0, i32 2, i32 0), align 1
   %i5527 = lshr i120 %i5526, 19
   %i5528 = trunc i120 %i5527 to i64
   %i5529 = and i64 %i5528, 4194303
   call fastcc void @transparent_crc(i64 %i5529, ptr @.str.2707, i32 signext undef)
-  %i5530 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2962, i64 0, i32 2, i32 0), align 1
+  %i5530 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2962, i64 0, i32 2, i32 0), align 1
   %i5531 = shl i120 %i5530, 101
   %i5532 = ashr exact i120 %i5531, 69
   %i5533 = trunc i120 %i5532 to i64
   %i5534 = ashr exact i64 %i5533, 32
   call fastcc void @transparent_crc(i64 %i5534, ptr @.str.2708, i32 signext undef)
-  %i5535 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2962, i64 0, i32 3, i32 0), align 2, !tbaa !44
+  %i5535 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2962, i64 0, i32 3, i32 0), align 2, !tbaa !44
   %i5536 = zext i8 %i5535 to i64
   call fastcc void @transparent_crc(i64 %i5536, ptr @.str.2709, i32 signext undef)
-  %i5537 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2962, i64 0, i32 3, i32 1), align 1, !tbaa !45
+  %i5537 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2962, i64 0, i32 3, i32 1), align 1, !tbaa !45
   %i5538 = sext i8 %i5537 to i64
   call fastcc void @transparent_crc(i64 %i5538, ptr @.str.2710, i32 signext undef)
-  %i5539 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2962, i64 0, i32 3, i32 2), align 2, !tbaa !46
+  %i5539 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2962, i64 0, i32 3, i32 2), align 2, !tbaa !46
   %i5540 = sext i16 %i5539 to i64
   call fastcc void @transparent_crc(i64 %i5540, ptr @.str.2711, i32 signext undef)
-  %i5541 = load i64, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2962, i64 0, i32 3, i32 3), align 2, !tbaa !47
+  %i5541 = load i64, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2962, i64 0, i32 3, i32 3), align 2, !tbaa !47
   call fastcc void @transparent_crc(i64 %i5541, ptr @.str.2712, i32 signext undef)
-  %i5542 = load i32, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2962, i64 0, i32 3, i32 4), align 2, !tbaa !48
+  %i5542 = load i32, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2962, i64 0, i32 3, i32 4), align 2, !tbaa !48
   %i5543 = sext i32 %i5542 to i64
   call fastcc void @transparent_crc(i64 %i5543, ptr @.str.2713, i32 signext undef)
-  %i5544 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2962, i64 0, i32 4, i32 0), align 2
+  %i5544 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2962, i64 0, i32 4, i32 0), align 2
   %i5545 = ashr i128 %i5544, 99
   %i5546 = shl nsw i128 %i5545, 32
   %i5547 = trunc i128 %i5546 to i64
   %i5548 = ashr exact i64 %i5547, 32
   call fastcc void @transparent_crc(i64 %i5548, ptr @.str.2714, i32 signext undef)
-  %i5549 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2962, i64 0, i32 4, i32 0), align 2
+  %i5549 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2962, i64 0, i32 4, i32 0), align 2
   %i5550 = shl i128 %i5549, 29
   %i5551 = ashr i128 %i5550, 97
   %i5552 = shl nsw i128 %i5551, 32
   %i5553 = trunc i128 %i5552 to i64
   %i5554 = ashr exact i64 %i5553, 32
   call fastcc void @transparent_crc(i64 %i5554, ptr @.str.2715, i32 signext undef)
-  %i5555 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2962, i64 0, i32 4, i32 0), align 2
+  %i5555 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2962, i64 0, i32 4, i32 0), align 2
   %i5556 = shl i128 %i5555, 60
   %i5557 = ashr i128 %i5556, 108
   %i5558 = shl nsw i128 %i5557, 32
   %i5559 = trunc i128 %i5558 to i64
   %i5560 = ashr exact i64 %i5559, 32
   call fastcc void @transparent_crc(i64 %i5560, ptr @.str.2716, i32 signext undef)
-  %i5561 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2962, i64 0, i32 4, i32 0), align 2
+  %i5561 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2962, i64 0, i32 4, i32 0), align 2
   %i5562 = shl i128 %i5561, 80
   %i5563 = ashr i128 %i5562, 110
   %i5564 = shl nsw i128 %i5563, 32
   %i5565 = trunc i128 %i5564 to i64
   %i5566 = ashr exact i64 %i5565, 32
   call fastcc void @transparent_crc(i64 %i5566, ptr @.str.2717, i32 signext undef)
-  %i5567 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2962, i64 0, i32 4, i32 0), align 2
+  %i5567 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2962, i64 0, i32 4, i32 0), align 2
   %i5568 = lshr i128 %i5567, 28
   %i5569 = trunc i128 %i5568 to i64
   %i5570 = and i64 %i5569, 3
   call fastcc void @transparent_crc(i64 %i5570, ptr @.str.2718, i32 signext undef)
-  %i5571 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2962, i64 0, i32 4, i32 0), align 2
+  %i5571 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2962, i64 0, i32 4, i32 0), align 2
   %i5572 = shl i128 %i5571, 100
   %i5573 = ashr i128 %i5572, 107
   %i5574 = shl nsw i128 %i5573, 32
   %i5575 = trunc i128 %i5574 to i64
   %i5576 = ashr exact i64 %i5575, 32
   call fastcc void @transparent_crc(i64 %i5576, ptr @.str.2719, i32 signext undef)
-  %i5577 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2962, i64 0, i32 5, i32 0, i32 0), align 2
+  %i5577 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2962, i64 0, i32 5, i32 0, i32 0), align 2
   %i5578 = lshr i80 %i5577, 57
   %i5579 = trunc i80 %i5578 to i64
   call fastcc void @transparent_crc(i64 %i5579, ptr @.str.2720, i32 signext undef)
-  %i5580 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2962, i64 0, i32 5, i32 0, i32 0), align 2
+  %i5580 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2962, i64 0, i32 5, i32 0, i32 0), align 2
   %i5581 = shl i80 %i5580, 23
   %i5582 = ashr i80 %i5581, 64
   %i5583 = shl nsw i80 %i5582, 32
   %i5584 = trunc i80 %i5583 to i64
   %i5585 = ashr exact i64 %i5584, 32
   call fastcc void @transparent_crc(i64 %i5585, ptr @.str.2721, i32 signext undef)
-  %i5586 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2962, i64 0, i32 5, i32 0, i32 0), align 2
+  %i5586 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2962, i64 0, i32 5, i32 0, i32 0), align 2
   %i5587 = shl i80 %i5586, 39
   %i5588 = ashr i80 %i5587, 62
   %i5589 = shl nsw i80 %i5588, 32
   %i5590 = trunc i80 %i5589 to i64
   %i5591 = ashr exact i64 %i5590, 32
   call fastcc void @transparent_crc(i64 %i5591, ptr @.str.2722, i32 signext undef)
-  %i5592 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2962, i64 0, i32 5, i32 0, i32 0), align 2
+  %i5592 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2962, i64 0, i32 5, i32 0, i32 0), align 2
   %i5593 = shl i80 %i5592, 57
   %i5594 = ashr i80 %i5593, 58
   %i5595 = shl nsw i80 %i5594, 32
   %i5596 = trunc i80 %i5595 to i64
   %i5597 = ashr exact i64 %i5596, 32
   call fastcc void @transparent_crc(i64 %i5597, ptr @.str.2723, i32 signext undef)
-  %i5598 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2962, i64 0, i32 5, i32 1), align 2
+  %i5598 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2962, i64 0, i32 5, i32 1), align 2
   %i5599 = lshr i80 %i5598, 49
   %i5600 = trunc i80 %i5599 to i64
   call fastcc void @transparent_crc(i64 %i5600, ptr @.str.2724, i32 signext undef)
-  %i5601 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2962, i64 0, i32 5, i32 1), align 2
+  %i5601 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2962, i64 0, i32 5, i32 1), align 2
   %i5602 = lshr i80 %i5601, 24
   %i5603 = trunc i80 %i5602 to i64
   %i5604 = and i64 %i5603, 33554431
   call fastcc void @transparent_crc(i64 %i5604, ptr @.str.2725, i32 signext undef)
-  %i5605 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2962, i64 0, i32 5, i32 1), align 2
+  %i5605 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2962, i64 0, i32 5, i32 1), align 2
   %i5606 = shl i80 %i5605, 56
   %i5607 = ashr i80 %i5606, 68
   %i5608 = shl nsw i80 %i5607, 32
   %i5609 = trunc i80 %i5608 to i64
   %i5610 = ashr exact i64 %i5609, 32
   call fastcc void @transparent_crc(i64 %i5610, ptr @.str.2726, i32 signext undef)
-  %i5611 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2962, i64 0, i32 5, i32 1), align 2
+  %i5611 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2962, i64 0, i32 5, i32 1), align 2
   %i5612 = lshr i80 %i5611, 11
   %i5613 = trunc i80 %i5612 to i64
   %i5614 = and i64 %i5613, 1
   call fastcc void @transparent_crc(i64 %i5614, ptr @.str.2727, i32 signext undef)
-  %i5615 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2962, i64 0, i32 5, i32 1), align 2
+  %i5615 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2962, i64 0, i32 5, i32 1), align 2
   %i5616 = shl i80 %i5615, 69
   %i5617 = ashr i80 %i5616, 72
   %i5618 = shl nsw i80 %i5617, 32
   %i5619 = trunc i80 %i5618 to i64
   %i5620 = ashr exact i64 %i5619, 32
   call fastcc void @transparent_crc(i64 %i5620, ptr @.str.2728, i32 signext undef)
-  %i5621 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2965, i64 0, i32 2, i32 0), align 1
+  %i5621 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2965, i64 0, i32 2, i32 0), align 1
   %i5622 = shl i120 %i5621, 58
   %i5623 = ashr i120 %i5622, 105
   %i5624 = shl nsw i120 %i5623, 32
   %i5625 = trunc i120 %i5624 to i64
   %i5626 = ashr exact i64 %i5625, 32
   call fastcc void @transparent_crc(i64 %i5626, ptr @.str.2798, i32 signext undef)
-  %i5627 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2965, i64 0, i32 2, i32 0), align 1
+  %i5627 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2965, i64 0, i32 2, i32 0), align 1
   %i5628 = lshr i120 %i5627, 41
   %i5629 = trunc i120 %i5628 to i64
   %i5630 = and i64 %i5629, 63
   call fastcc void @transparent_crc(i64 %i5630, ptr @.str.2799, i32 signext undef)
-  %i5631 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2965, i64 0, i32 2, i32 0), align 1
+  %i5631 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2965, i64 0, i32 2, i32 0), align 1
   %i5632 = lshr i120 %i5631, 19
   %i5633 = trunc i120 %i5632 to i64
   %i5634 = and i64 %i5633, 4194303
   call fastcc void @transparent_crc(i64 %i5634, ptr @.str.2800, i32 signext undef)
-  %i5635 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2965, i64 0, i32 2, i32 0), align 1
+  %i5635 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2965, i64 0, i32 2, i32 0), align 1
   %i5636 = shl i120 %i5635, 101
   %i5637 = ashr exact i120 %i5636, 69
   %i5638 = trunc i120 %i5637 to i64
   %i5639 = ashr exact i64 %i5638, 32
   call fastcc void @transparent_crc(i64 %i5639, ptr @.str.2801, i32 signext undef)
-  %i5640 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2965, i64 0, i32 3, i32 0), align 2, !tbaa !44
+  %i5640 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2965, i64 0, i32 3, i32 0), align 2, !tbaa !44
   %i5641 = zext i8 %i5640 to i64
   call fastcc void @transparent_crc(i64 %i5641, ptr @.str.2802, i32 signext undef)
-  %i5642 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2965, i64 0, i32 3, i32 1), align 1, !tbaa !45
+  %i5642 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2965, i64 0, i32 3, i32 1), align 1, !tbaa !45
   %i5643 = sext i8 %i5642 to i64
   call fastcc void @transparent_crc(i64 %i5643, ptr @.str.2803, i32 signext undef)
-  %i5644 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2965, i64 0, i32 3, i32 2), align 2, !tbaa !46
+  %i5644 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2965, i64 0, i32 3, i32 2), align 2, !tbaa !46
   %i5645 = sext i16 %i5644 to i64
   call fastcc void @transparent_crc(i64 %i5645, ptr @.str.2804, i32 signext undef)
-  %i5646 = load i64, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2965, i64 0, i32 3, i32 3), align 2, !tbaa !47
+  %i5646 = load i64, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2965, i64 0, i32 3, i32 3), align 2, !tbaa !47
   call fastcc void @transparent_crc(i64 %i5646, ptr @.str.2805, i32 signext undef)
-  %i5647 = load i32, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2965, i64 0, i32 3, i32 4), align 2, !tbaa !48
+  %i5647 = load i32, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2965, i64 0, i32 3, i32 4), align 2, !tbaa !48
   %i5648 = sext i32 %i5647 to i64
   call fastcc void @transparent_crc(i64 %i5648, ptr @.str.2806, i32 signext undef)
-  %i5649 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2965, i64 0, i32 4, i32 0), align 2
+  %i5649 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2965, i64 0, i32 4, i32 0), align 2
   %i5650 = ashr i128 %i5649, 99
   %i5651 = shl nsw i128 %i5650, 32
   %i5652 = trunc i128 %i5651 to i64
   %i5653 = ashr exact i64 %i5652, 32
   call fastcc void @transparent_crc(i64 %i5653, ptr @.str.2807, i32 signext undef)
-  %i5654 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2965, i64 0, i32 4, i32 0), align 2
+  %i5654 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2965, i64 0, i32 4, i32 0), align 2
   %i5655 = shl i128 %i5654, 29
   %i5656 = ashr i128 %i5655, 97
   %i5657 = shl nsw i128 %i5656, 32
   %i5658 = trunc i128 %i5657 to i64
   %i5659 = ashr exact i64 %i5658, 32
   call fastcc void @transparent_crc(i64 %i5659, ptr @.str.2808, i32 signext undef)
-  %i5660 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2965, i64 0, i32 4, i32 0), align 2
+  %i5660 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2965, i64 0, i32 4, i32 0), align 2
   call fastcc void @transparent_crc(i64 0, ptr @.str.2814, i32 signext undef)
-  %i5661 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2965, i64 0, i32 5, i32 0, i32 0), align 2
+  %i5661 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2965, i64 0, i32 5, i32 0, i32 0), align 2
   call fastcc void @transparent_crc(i64 undef, ptr @.str.2841, i32 signext undef)
-  %i5662 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2966, i64 0, i32 4, i32 0), align 2
+  %i5662 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2966, i64 0, i32 4, i32 0), align 2
   call fastcc void @transparent_crc(i64 0, ptr @.str.2936, i32 signext undef)
-  %i5663 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2969, i64 0, i32 5, i32 0, i32 0), align 2
+  %i5663 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2969, i64 0, i32 5, i32 0, i32 0), align 2
   %i5664 = lshr i80 %i5663, 57
   %i5665 = trunc i80 %i5664 to i64
   call fastcc void @transparent_crc(i64 %i5665, ptr @.str.2937, i32 signext undef)
-  %i5666 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2969, i64 0, i32 5, i32 0, i32 0), align 2
+  %i5666 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2969, i64 0, i32 5, i32 0, i32 0), align 2
   %i5667 = shl i80 %i5666, 23
   %i5668 = ashr i80 %i5667, 64
   %i5669 = shl nsw i80 %i5668, 32
   %i5670 = trunc i80 %i5669 to i64
   %i5671 = ashr exact i64 %i5670, 32
   call fastcc void @transparent_crc(i64 %i5671, ptr @.str.2938, i32 signext undef)
-  %i5672 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2969, i64 0, i32 5, i32 0, i32 0), align 2
+  %i5672 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2969, i64 0, i32 5, i32 0, i32 0), align 2
   %i5673 = shl i80 %i5672, 39
   %i5674 = ashr i80 %i5673, 62
   %i5675 = shl nsw i80 %i5674, 32
   %i5676 = trunc i80 %i5675 to i64
   %i5677 = ashr exact i64 %i5676, 32
   call fastcc void @transparent_crc(i64 %i5677, ptr @.str.2939, i32 signext undef)
-  %i5678 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2969, i64 0, i32 5, i32 0, i32 0), align 2
+  %i5678 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2969, i64 0, i32 5, i32 0, i32 0), align 2
   %i5679 = shl i80 %i5678, 57
   %i5680 = ashr i80 %i5679, 58
   %i5681 = shl nsw i80 %i5680, 32
   %i5682 = trunc i80 %i5681 to i64
   %i5683 = ashr exact i64 %i5682, 32
   call fastcc void @transparent_crc(i64 %i5683, ptr @.str.2940, i32 signext undef)
-  %i5684 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2969, i64 0, i32 5, i32 1), align 2
+  %i5684 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2969, i64 0, i32 5, i32 1), align 2
   %i5685 = lshr i80 %i5684, 49
   %i5686 = trunc i80 %i5685 to i64
   call fastcc void @transparent_crc(i64 %i5686, ptr @.str.2941, i32 signext undef)
-  %i5687 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2969, i64 0, i32 5, i32 1), align 2
+  %i5687 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2969, i64 0, i32 5, i32 1), align 2
   %i5688 = lshr i80 %i5687, 24
   %i5689 = trunc i80 %i5688 to i64
   %i5690 = and i64 %i5689, 33554431
   call fastcc void @transparent_crc(i64 %i5690, ptr @.str.2942, i32 signext undef)
-  %i5691 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2969, i64 0, i32 5, i32 1), align 2
+  %i5691 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2969, i64 0, i32 5, i32 1), align 2
   %i5692 = shl i80 %i5691, 56
   %i5693 = ashr i80 %i5692, 68
   %i5694 = shl nsw i80 %i5693, 32
   %i5695 = trunc i80 %i5694 to i64
   %i5696 = ashr exact i64 %i5695, 32
   call fastcc void @transparent_crc(i64 %i5696, ptr @.str.2943, i32 signext undef)
-  %i5697 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2969, i64 0, i32 5, i32 1), align 2
+  %i5697 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2969, i64 0, i32 5, i32 1), align 2
   %i5698 = lshr i80 %i5697, 11
   %i5699 = trunc i80 %i5698 to i64
   %i5700 = and i64 %i5699, 1
   call fastcc void @transparent_crc(i64 %i5700, ptr @.str.2944, i32 signext undef)
-  %i5701 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2969, i64 0, i32 5, i32 1), align 2
+  %i5701 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2969, i64 0, i32 5, i32 1), align 2
   %i5702 = shl i80 %i5701, 69
   %i5703 = ashr i80 %i5702, 72
   %i5704 = shl nsw i80 %i5703, 32
   %i5705 = trunc i80 %i5704 to i64
   %i5706 = ashr exact i64 %i5705, 32
   call fastcc void @transparent_crc(i64 %i5706, ptr @.str.2945, i32 signext undef)
-  %i5707 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2969, i64 0, i32 6), align 2, !tbaa !49
+  %i5707 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2969, i64 0, i32 6), align 2, !tbaa !49
   %i5708 = sext i16 %i5707 to i64
   call fastcc void @transparent_crc(i64 %i5708, ptr @.str.2946, i32 signext undef)
-  %i5709 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2969, i64 0, i32 7), align 2, !tbaa !50
+  %i5709 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2969, i64 0, i32 7), align 2, !tbaa !50
   %i5710 = zext i16 %i5709 to i64
   call fastcc void @transparent_crc(i64 %i5710, ptr @.str.2947, i32 signext undef)
-  %i5711 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2970, i64 0, i32 0), align 2, !tbaa !23
+  %i5711 = load i16, ptr @g_2970, align 2, !tbaa !23
   %i5712 = sext i16 %i5711 to i64
   call fastcc void @transparent_crc(i64 %i5712, ptr @.str.2948, i32 signext undef)
-  %i5713 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2970, i64 0, i32 1), align 2, !tbaa !51
+  %i5713 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2970, i64 0, i32 1), align 2, !tbaa !51
   %i5714 = sext i8 %i5713 to i64
   call fastcc void @transparent_crc(i64 %i5714, ptr @.str.2949, i32 signext undef)
-  %i5715 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2970, i64 0, i32 2, i32 0), align 1
+  %i5715 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2970, i64 0, i32 2, i32 0), align 1
   %i5716 = lshr i120 %i5715, 107
   %i5717 = trunc i120 %i5716 to i64
   call fastcc void @transparent_crc(i64 %i5717, ptr @.str.2950, i32 signext undef)
-  %i5718 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2970, i64 0, i32 2, i32 0), align 1
+  %i5718 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2970, i64 0, i32 2, i32 0), align 1
   %i5719 = lshr i120 %i5718, 78
   %i5720 = trunc i120 %i5719 to i64
   %i5721 = and i64 %i5720, 536870911
   call fastcc void @transparent_crc(i64 %i5721, ptr @.str.2951, i32 signext undef)
-  %i5722 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2970, i64 0, i32 2, i32 0), align 1
+  %i5722 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2970, i64 0, i32 2, i32 0), align 1
   %i5723 = shl i120 %i5722, 42
   %i5724 = ashr i120 %i5723, 104
   %i5725 = shl nsw i120 %i5724, 32
   %i5726 = trunc i120 %i5725 to i64
   %i5727 = ashr exact i64 %i5726, 32
   call fastcc void @transparent_crc(i64 %i5727, ptr @.str.2952, i32 signext undef)
-  %i5728 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2970, i64 0, i32 2, i32 0), align 1
+  %i5728 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2970, i64 0, i32 2, i32 0), align 1
   %i5729 = shl i120 %i5728, 58
   %i5730 = ashr i120 %i5729, 105
   %i5731 = shl nsw i120 %i5730, 32
   %i5732 = trunc i120 %i5731 to i64
   %i5733 = ashr exact i64 %i5732, 32
   call fastcc void @transparent_crc(i64 %i5733, ptr @.str.2953, i32 signext undef)
-  %i5734 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2970, i64 0, i32 2, i32 0), align 1
+  %i5734 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2970, i64 0, i32 2, i32 0), align 1
   %i5735 = lshr i120 %i5734, 41
   %i5736 = trunc i120 %i5735 to i64
   %i5737 = and i64 %i5736, 63
   call fastcc void @transparent_crc(i64 %i5737, ptr @.str.2954, i32 signext undef)
-  %i5738 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2970, i64 0, i32 2, i32 0), align 1
+  %i5738 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2970, i64 0, i32 2, i32 0), align 1
   %i5739 = lshr i120 %i5738, 19
   %i5740 = trunc i120 %i5739 to i64
   %i5741 = and i64 %i5740, 4194303
   call fastcc void @transparent_crc(i64 %i5741, ptr @.str.2955, i32 signext undef)
-  %i5742 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2970, i64 0, i32 2, i32 0), align 1
+  %i5742 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2970, i64 0, i32 2, i32 0), align 1
   %i5743 = shl i120 %i5742, 101
   %i5744 = ashr exact i120 %i5743, 69
   %i5745 = trunc i120 %i5744 to i64
   %i5746 = ashr exact i64 %i5745, 32
   call fastcc void @transparent_crc(i64 %i5746, ptr @.str.2956, i32 signext undef)
-  %i5747 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2970, i64 0, i32 3, i32 0), align 2, !tbaa !44
+  %i5747 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2970, i64 0, i32 3, i32 0), align 2, !tbaa !44
   %i5748 = zext i8 %i5747 to i64
   call fastcc void @transparent_crc(i64 %i5748, ptr @.str.2957, i32 signext undef)
-  %i5749 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2970, i64 0, i32 3, i32 1), align 1, !tbaa !45
+  %i5749 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2970, i64 0, i32 3, i32 1), align 1, !tbaa !45
   %i5750 = sext i8 %i5749 to i64
   call fastcc void @transparent_crc(i64 %i5750, ptr @.str.2958, i32 signext undef)
-  %i5751 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2970, i64 0, i32 3, i32 2), align 2, !tbaa !46
+  %i5751 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2970, i64 0, i32 3, i32 2), align 2, !tbaa !46
   %i5752 = sext i16 %i5751 to i64
   call fastcc void @transparent_crc(i64 %i5752, ptr @.str.2959, i32 signext undef)
-  %i5753 = load i64, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2970, i64 0, i32 3, i32 3), align 2, !tbaa !47
+  %i5753 = load i64, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2970, i64 0, i32 3, i32 3), align 2, !tbaa !47
   call fastcc void @transparent_crc(i64 %i5753, ptr @.str.2960, i32 signext undef)
-  %i5754 = load i32, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2970, i64 0, i32 3, i32 4), align 2, !tbaa !48
+  %i5754 = load i32, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2970, i64 0, i32 3, i32 4), align 2, !tbaa !48
   %i5755 = sext i32 %i5754 to i64
   call fastcc void @transparent_crc(i64 %i5755, ptr @.str.2961, i32 signext undef)
-  %i5756 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2970, i64 0, i32 4, i32 0), align 2
+  %i5756 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2970, i64 0, i32 4, i32 0), align 2
   %i5757 = ashr i128 %i5756, 99
   %i5758 = shl nsw i128 %i5757, 32
   %i5759 = trunc i128 %i5758 to i64
   %i5760 = ashr exact i64 %i5759, 32
   call fastcc void @transparent_crc(i64 %i5760, ptr @.str.2962, i32 signext undef)
-  %i5761 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2970, i64 0, i32 4, i32 0), align 2
+  %i5761 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2970, i64 0, i32 4, i32 0), align 2
   %i5762 = shl i128 %i5761, 29
   %i5763 = ashr i128 %i5762, 97
   %i5764 = shl nsw i128 %i5763, 32
   %i5765 = trunc i128 %i5764 to i64
   %i5766 = ashr exact i64 %i5765, 32
   call fastcc void @transparent_crc(i64 %i5766, ptr @.str.2963, i32 signext undef)
-  %i5767 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2970, i64 0, i32 4, i32 0), align 2
+  %i5767 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2970, i64 0, i32 4, i32 0), align 2
   %i5768 = shl i128 %i5767, 60
   %i5769 = ashr i128 %i5768, 108
   %i5770 = shl nsw i128 %i5769, 32
   %i5771 = trunc i128 %i5770 to i64
   %i5772 = ashr exact i64 %i5771, 32
   call fastcc void @transparent_crc(i64 %i5772, ptr @.str.2964, i32 signext undef)
-  %i5773 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2970, i64 0, i32 4, i32 0), align 2
+  %i5773 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2970, i64 0, i32 4, i32 0), align 2
   %i5774 = shl i128 %i5773, 80
   %i5775 = ashr i128 %i5774, 110
   %i5776 = shl nsw i128 %i5775, 32
   %i5777 = trunc i128 %i5776 to i64
   %i5778 = ashr exact i64 %i5777, 32
   call fastcc void @transparent_crc(i64 %i5778, ptr @.str.2965, i32 signext undef)
-  %i5779 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2970, i64 0, i32 4, i32 0), align 2
+  %i5779 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2970, i64 0, i32 4, i32 0), align 2
   %i5780 = lshr i128 %i5779, 28
   %i5781 = trunc i128 %i5780 to i64
   %i5782 = and i64 %i5781, 3
   call fastcc void @transparent_crc(i64 %i5782, ptr @.str.2966, i32 signext undef)
-  %i5783 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2970, i64 0, i32 4, i32 0), align 2
+  %i5783 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2970, i64 0, i32 4, i32 0), align 2
   %i5784 = shl i128 %i5783, 100
   %i5785 = ashr i128 %i5784, 107
   %i5786 = shl nsw i128 %i5785, 32
   %i5787 = trunc i128 %i5786 to i64
   %i5788 = ashr exact i64 %i5787, 32
   call fastcc void @transparent_crc(i64 %i5788, ptr @.str.2967, i32 signext undef)
-  %i5789 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2970, i64 0, i32 5, i32 0, i32 0), align 2
+  %i5789 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2970, i64 0, i32 5, i32 0, i32 0), align 2
   %i5790 = lshr i80 %i5789, 57
   %i5791 = trunc i80 %i5790 to i64
   call fastcc void @transparent_crc(i64 %i5791, ptr @.str.2968, i32 signext undef)
-  %i5792 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2970, i64 0, i32 5, i32 0, i32 0), align 2
+  %i5792 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2970, i64 0, i32 5, i32 0, i32 0), align 2
   %i5793 = shl i80 %i5792, 23
   %i5794 = ashr i80 %i5793, 64
   %i5795 = shl nsw i80 %i5794, 32
   %i5796 = trunc i80 %i5795 to i64
   %i5797 = ashr exact i64 %i5796, 32
   call fastcc void @transparent_crc(i64 %i5797, ptr @.str.2969, i32 signext undef)
-  %i5798 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2970, i64 0, i32 5, i32 0, i32 0), align 2
+  %i5798 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2970, i64 0, i32 5, i32 0, i32 0), align 2
   %i5799 = shl i80 %i5798, 39
   %i5800 = ashr i80 %i5799, 62
   %i5801 = shl nsw i80 %i5800, 32
   %i5802 = trunc i80 %i5801 to i64
   %i5803 = ashr exact i64 %i5802, 32
   call fastcc void @transparent_crc(i64 %i5803, ptr @.str.2970, i32 signext undef)
-  %i5804 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2970, i64 0, i32 5, i32 0, i32 0), align 2
+  %i5804 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2970, i64 0, i32 5, i32 0, i32 0), align 2
   %i5805 = shl i80 %i5804, 57
   %i5806 = ashr i80 %i5805, 58
   %i5807 = shl nsw i80 %i5806, 32
   %i5808 = trunc i80 %i5807 to i64
   %i5809 = ashr exact i64 %i5808, 32
   call fastcc void @transparent_crc(i64 %i5809, ptr @.str.2971, i32 signext undef)
-  %i5810 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2970, i64 0, i32 5, i32 1), align 2
+  %i5810 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2970, i64 0, i32 5, i32 1), align 2
   %i5811 = lshr i80 %i5810, 49
   %i5812 = trunc i80 %i5811 to i64
   call fastcc void @transparent_crc(i64 %i5812, ptr @.str.2972, i32 signext undef)
-  %i5813 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2970, i64 0, i32 5, i32 1), align 2
+  %i5813 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2970, i64 0, i32 5, i32 1), align 2
   %i5814 = lshr i80 %i5813, 24
   %i5815 = trunc i80 %i5814 to i64
   %i5816 = and i64 %i5815, 33554431
   call fastcc void @transparent_crc(i64 %i5816, ptr @.str.2973, i32 signext undef)
-  %i5817 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2970, i64 0, i32 5, i32 1), align 2
+  %i5817 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2970, i64 0, i32 5, i32 1), align 2
   %i5818 = shl i80 %i5817, 56
   %i5819 = ashr i80 %i5818, 68
   %i5820 = shl nsw i80 %i5819, 32
   %i5821 = trunc i80 %i5820 to i64
   %i5822 = ashr exact i64 %i5821, 32
   call fastcc void @transparent_crc(i64 %i5822, ptr @.str.2974, i32 signext undef)
-  %i5823 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2970, i64 0, i32 5, i32 1), align 2
+  %i5823 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2970, i64 0, i32 5, i32 1), align 2
   %i5824 = lshr i80 %i5823, 11
   %i5825 = trunc i80 %i5824 to i64
   %i5826 = and i64 %i5825, 1
   call fastcc void @transparent_crc(i64 %i5826, ptr @.str.2975, i32 signext undef)
-  %i5827 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2970, i64 0, i32 5, i32 1), align 2
+  %i5827 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2970, i64 0, i32 5, i32 1), align 2
   %i5828 = shl i80 %i5827, 69
   %i5829 = ashr i80 %i5828, 72
   %i5830 = shl nsw i80 %i5829, 32
   %i5831 = trunc i80 %i5830 to i64
   %i5832 = ashr exact i64 %i5831, 32
   call fastcc void @transparent_crc(i64 %i5832, ptr @.str.2976, i32 signext undef)
-  %i5833 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2970, i64 0, i32 6), align 2, !tbaa !49
+  %i5833 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2970, i64 0, i32 6), align 2, !tbaa !49
   %i5834 = sext i16 %i5833 to i64
   call fastcc void @transparent_crc(i64 %i5834, ptr @.str.2977, i32 signext undef)
-  %i5835 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2970, i64 0, i32 7), align 2, !tbaa !50
+  %i5835 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2970, i64 0, i32 7), align 2, !tbaa !50
   %i5836 = zext i16 %i5835 to i64
   call fastcc void @transparent_crc(i64 %i5836, ptr @.str.2978, i32 signext undef)
-  %i5837 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2971, i64 0, i32 0), align 2, !tbaa !23
+  %i5837 = load i16, ptr @g_2971, align 2, !tbaa !23
   %i5838 = sext i16 %i5837 to i64
   call fastcc void @transparent_crc(i64 %i5838, ptr @.str.2979, i32 signext undef)
-  %i5839 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2971, i64 0, i32 1), align 2, !tbaa !51
+  %i5839 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2971, i64 0, i32 1), align 2, !tbaa !51
   %i5840 = sext i8 %i5839 to i64
   call fastcc void @transparent_crc(i64 %i5840, ptr @.str.2980, i32 signext undef)
-  %i5841 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2971, i64 0, i32 2, i32 0), align 1
+  %i5841 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2971, i64 0, i32 2, i32 0), align 1
   %i5842 = lshr i120 %i5841, 107
   %i5843 = trunc i120 %i5842 to i64
   call fastcc void @transparent_crc(i64 %i5843, ptr @.str.2981, i32 signext undef)
-  %i5844 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2971, i64 0, i32 2, i32 0), align 1
+  %i5844 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2971, i64 0, i32 2, i32 0), align 1
   %i5845 = lshr i120 %i5844, 78
   %i5846 = trunc i120 %i5845 to i64
   %i5847 = and i64 %i5846, 536870911
   call fastcc void @transparent_crc(i64 %i5847, ptr @.str.2982, i32 signext undef)
-  %i5848 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2971, i64 0, i32 2, i32 0), align 1
+  %i5848 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2971, i64 0, i32 2, i32 0), align 1
   %i5849 = shl i120 %i5848, 42
   %i5850 = ashr i120 %i5849, 104
   %i5851 = shl nsw i120 %i5850, 32
   %i5852 = trunc i120 %i5851 to i64
   %i5853 = ashr exact i64 %i5852, 32
   call fastcc void @transparent_crc(i64 %i5853, ptr @.str.2983, i32 signext undef)
-  %i5854 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2971, i64 0, i32 2, i32 0), align 1
+  %i5854 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2971, i64 0, i32 2, i32 0), align 1
   %i5855 = shl i120 %i5854, 58
   %i5856 = ashr i120 %i5855, 105
   %i5857 = shl nsw i120 %i5856, 32
   %i5858 = trunc i120 %i5857 to i64
   %i5859 = ashr exact i64 %i5858, 32
   call fastcc void @transparent_crc(i64 %i5859, ptr @.str.2984, i32 signext undef)
-  %i5860 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2971, i64 0, i32 2, i32 0), align 1
+  %i5860 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2971, i64 0, i32 2, i32 0), align 1
   %i5861 = lshr i120 %i5860, 41
   %i5862 = trunc i120 %i5861 to i64
   %i5863 = and i64 %i5862, 63
   call fastcc void @transparent_crc(i64 %i5863, ptr @.str.2985, i32 signext undef)
-  %i5864 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2971, i64 0, i32 2, i32 0), align 1
+  %i5864 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2971, i64 0, i32 2, i32 0), align 1
   %i5865 = lshr i120 %i5864, 19
   %i5866 = trunc i120 %i5865 to i64
   %i5867 = and i64 %i5866, 4194303
   call fastcc void @transparent_crc(i64 %i5867, ptr @.str.2986, i32 signext undef)
-  %i5868 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2971, i64 0, i32 2, i32 0), align 1
+  %i5868 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2971, i64 0, i32 2, i32 0), align 1
   %i5869 = shl i120 %i5868, 101
   %i5870 = ashr exact i120 %i5869, 69
   %i5871 = trunc i120 %i5870 to i64
   %i5872 = ashr exact i64 %i5871, 32
   call fastcc void @transparent_crc(i64 %i5872, ptr @.str.2987, i32 signext undef)
-  %i5873 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2971, i64 0, i32 3, i32 0), align 2, !tbaa !44
+  %i5873 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2971, i64 0, i32 3, i32 0), align 2, !tbaa !44
   %i5874 = zext i8 %i5873 to i64
   call fastcc void @transparent_crc(i64 %i5874, ptr @.str.2988, i32 signext undef)
-  %i5875 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2971, i64 0, i32 3, i32 1), align 1, !tbaa !45
+  %i5875 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2971, i64 0, i32 3, i32 1), align 1, !tbaa !45
   %i5876 = sext i8 %i5875 to i64
   call fastcc void @transparent_crc(i64 %i5876, ptr @.str.2989, i32 signext undef)
-  %i5877 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2971, i64 0, i32 3, i32 2), align 2, !tbaa !46
+  %i5877 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2971, i64 0, i32 3, i32 2), align 2, !tbaa !46
   %i5878 = sext i16 %i5877 to i64
   call fastcc void @transparent_crc(i64 %i5878, ptr @.str.2990, i32 signext undef)
-  %i5879 = load i64, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2971, i64 0, i32 3, i32 3), align 2, !tbaa !47
+  %i5879 = load i64, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2971, i64 0, i32 3, i32 3), align 2, !tbaa !47
   call fastcc void @transparent_crc(i64 %i5879, ptr @.str.2991, i32 signext undef)
-  %i5880 = load i32, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2971, i64 0, i32 3, i32 4), align 2, !tbaa !48
+  %i5880 = load i32, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2971, i64 0, i32 3, i32 4), align 2, !tbaa !48
   %i5881 = sext i32 %i5880 to i64
   call fastcc void @transparent_crc(i64 %i5881, ptr @.str.2992, i32 signext undef)
-  %i5882 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2971, i64 0, i32 4, i32 0), align 2
+  %i5882 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2971, i64 0, i32 4, i32 0), align 2
   %i5883 = ashr i128 %i5882, 99
   %i5884 = shl nsw i128 %i5883, 32
   %i5885 = trunc i128 %i5884 to i64
   %i5886 = ashr exact i64 %i5885, 32
   call fastcc void @transparent_crc(i64 %i5886, ptr @.str.2993, i32 signext undef)
-  %i5887 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2971, i64 0, i32 4, i32 0), align 2
+  %i5887 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2971, i64 0, i32 4, i32 0), align 2
   %i5888 = shl i128 %i5887, 29
   %i5889 = ashr i128 %i5888, 97
   %i5890 = shl nsw i128 %i5889, 32
   %i5891 = trunc i128 %i5890 to i64
   %i5892 = ashr exact i64 %i5891, 32
   call fastcc void @transparent_crc(i64 %i5892, ptr @.str.2994, i32 signext undef)
-  %i5893 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2971, i64 0, i32 4, i32 0), align 2
+  %i5893 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2971, i64 0, i32 4, i32 0), align 2
   %i5894 = shl i128 %i5893, 60
   %i5895 = ashr i128 %i5894, 108
   %i5896 = shl nsw i128 %i5895, 32
   %i5897 = trunc i128 %i5896 to i64
   %i5898 = ashr exact i64 %i5897, 32
   call fastcc void @transparent_crc(i64 %i5898, ptr @.str.2995, i32 signext undef)
-  %i5899 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2971, i64 0, i32 4, i32 0), align 2
+  %i5899 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2971, i64 0, i32 4, i32 0), align 2
   %i5900 = shl i128 %i5899, 80
   %i5901 = ashr i128 %i5900, 110
   %i5902 = shl nsw i128 %i5901, 32
   %i5903 = trunc i128 %i5902 to i64
   %i5904 = ashr exact i64 %i5903, 32
   call fastcc void @transparent_crc(i64 %i5904, ptr @.str.2996, i32 signext undef)
-  %i5905 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2971, i64 0, i32 4, i32 0), align 2
+  %i5905 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2971, i64 0, i32 4, i32 0), align 2
   %i5906 = lshr i128 %i5905, 28
   %i5907 = trunc i128 %i5906 to i64
   %i5908 = and i64 %i5907, 3
   call fastcc void @transparent_crc(i64 %i5908, ptr @.str.2997, i32 signext undef)
-  %i5909 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2971, i64 0, i32 4, i32 0), align 2
+  %i5909 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2971, i64 0, i32 4, i32 0), align 2
   %i5910 = shl i128 %i5909, 100
   %i5911 = ashr i128 %i5910, 107
   %i5912 = shl nsw i128 %i5911, 32
   %i5913 = trunc i128 %i5912 to i64
   %i5914 = ashr exact i64 %i5913, 32
   call fastcc void @transparent_crc(i64 %i5914, ptr @.str.2998, i32 signext undef)
-  %i5915 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2971, i64 0, i32 5, i32 0, i32 0), align 2
+  %i5915 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2971, i64 0, i32 5, i32 0, i32 0), align 2
   %i5916 = lshr i80 %i5915, 57
   %i5917 = trunc i80 %i5916 to i64
   call fastcc void @transparent_crc(i64 %i5917, ptr @.str.2999, i32 signext undef)
-  %i5918 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2971, i64 0, i32 5, i32 0, i32 0), align 2
+  %i5918 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2971, i64 0, i32 5, i32 0, i32 0), align 2
   %i5919 = shl i80 %i5918, 23
   %i5920 = ashr i80 %i5919, 64
   %i5921 = shl nsw i80 %i5920, 32
   %i5922 = trunc i80 %i5921 to i64
   %i5923 = ashr exact i64 %i5922, 32
   call fastcc void @transparent_crc(i64 %i5923, ptr @.str.3000, i32 signext undef)
-  %i5924 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2971, i64 0, i32 5, i32 0, i32 0), align 2
+  %i5924 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2971, i64 0, i32 5, i32 0, i32 0), align 2
   %i5925 = shl i80 %i5924, 39
   %i5926 = ashr i80 %i5925, 62
   %i5927 = shl nsw i80 %i5926, 32
   %i5928 = trunc i80 %i5927 to i64
   %i5929 = ashr exact i64 %i5928, 32
   call fastcc void @transparent_crc(i64 %i5929, ptr @.str.3001, i32 signext undef)
-  %i5930 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2971, i64 0, i32 5, i32 0, i32 0), align 2
+  %i5930 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2971, i64 0, i32 5, i32 0, i32 0), align 2
   %i5931 = shl i80 %i5930, 57
   %i5932 = ashr i80 %i5931, 58
   %i5933 = shl nsw i80 %i5932, 32
   %i5934 = trunc i80 %i5933 to i64
   %i5935 = ashr exact i64 %i5934, 32
   call fastcc void @transparent_crc(i64 %i5935, ptr @.str.3002, i32 signext undef)
-  %i5936 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2971, i64 0, i32 5, i32 1), align 2
+  %i5936 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2971, i64 0, i32 5, i32 1), align 2
   %i5937 = lshr i80 %i5936, 49
   %i5938 = trunc i80 %i5937 to i64
   call fastcc void @transparent_crc(i64 %i5938, ptr @.str.3003, i32 signext undef)
-  %i5939 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2971, i64 0, i32 5, i32 1), align 2
+  %i5939 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2971, i64 0, i32 5, i32 1), align 2
   %i5940 = lshr i80 %i5939, 24
   %i5941 = trunc i80 %i5940 to i64
   %i5942 = and i64 %i5941, 33554431
   call fastcc void @transparent_crc(i64 %i5942, ptr @.str.3004, i32 signext undef)
-  %i5943 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2971, i64 0, i32 5, i32 1), align 2
+  %i5943 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2971, i64 0, i32 5, i32 1), align 2
   %i5944 = shl i80 %i5943, 56
   %i5945 = ashr i80 %i5944, 68
   %i5946 = shl nsw i80 %i5945, 32
   %i5947 = trunc i80 %i5946 to i64
   %i5948 = ashr exact i64 %i5947, 32
   call fastcc void @transparent_crc(i64 %i5948, ptr @.str.3005, i32 signext undef)
-  %i5949 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2971, i64 0, i32 5, i32 1), align 2
+  %i5949 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2971, i64 0, i32 5, i32 1), align 2
   %i5950 = lshr i80 %i5949, 11
   %i5951 = trunc i80 %i5950 to i64
   %i5952 = and i64 %i5951, 1
   call fastcc void @transparent_crc(i64 %i5952, ptr @.str.3006, i32 signext undef)
-  %i5953 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2971, i64 0, i32 5, i32 1), align 2
+  %i5953 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2971, i64 0, i32 5, i32 1), align 2
   %i5954 = shl i80 %i5953, 69
   %i5955 = ashr i80 %i5954, 72
   %i5956 = shl nsw i80 %i5955, 32
   %i5957 = trunc i80 %i5956 to i64
   %i5958 = ashr exact i64 %i5957, 32
   call fastcc void @transparent_crc(i64 %i5958, ptr @.str.3007, i32 signext undef)
-  %i5959 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2971, i64 0, i32 6), align 2, !tbaa !49
+  %i5959 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2971, i64 0, i32 6), align 2, !tbaa !49
   %i5960 = sext i16 %i5959 to i64
   call fastcc void @transparent_crc(i64 %i5960, ptr @.str.3008, i32 signext undef)
-  %i5961 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2971, i64 0, i32 7), align 2, !tbaa !50
+  %i5961 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2971, i64 0, i32 7), align 2, !tbaa !50
   %i5962 = zext i16 %i5961 to i64
   call fastcc void @transparent_crc(i64 %i5962, ptr @.str.3009, i32 signext undef)
-  %i5963 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2972, i64 0, i32 0), align 2, !tbaa !23
+  %i5963 = load i16, ptr @g_2972, align 2, !tbaa !23
   %i5964 = sext i16 %i5963 to i64
   call fastcc void @transparent_crc(i64 %i5964, ptr @.str.3010, i32 signext undef)
-  %i5965 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2972, i64 0, i32 1), align 2, !tbaa !51
+  %i5965 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2972, i64 0, i32 1), align 2, !tbaa !51
   %i5966 = sext i8 %i5965 to i64
   call fastcc void @transparent_crc(i64 %i5966, ptr @.str.3011, i32 signext undef)
-  %i5967 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2972, i64 0, i32 2, i32 0), align 1
+  %i5967 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2972, i64 0, i32 2, i32 0), align 1
   %i5968 = lshr i120 %i5967, 107
   %i5969 = trunc i120 %i5968 to i64
   call fastcc void @transparent_crc(i64 %i5969, ptr @.str.3012, i32 signext undef)
-  %i5970 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2972, i64 0, i32 2, i32 0), align 1
+  %i5970 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2972, i64 0, i32 2, i32 0), align 1
   %i5971 = lshr i120 %i5970, 78
   %i5972 = trunc i120 %i5971 to i64
   %i5973 = and i64 %i5972, 536870911
   call fastcc void @transparent_crc(i64 %i5973, ptr @.str.3013, i32 signext undef)
-  %i5974 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2972, i64 0, i32 2, i32 0), align 1
+  %i5974 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2972, i64 0, i32 2, i32 0), align 1
   %i5975 = shl i120 %i5974, 42
   %i5976 = ashr i120 %i5975, 104
   %i5977 = shl nsw i120 %i5976, 32
   %i5978 = trunc i120 %i5977 to i64
   %i5979 = ashr exact i64 %i5978, 32
   call fastcc void @transparent_crc(i64 %i5979, ptr @.str.3014, i32 signext undef)
-  %i5980 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2972, i64 0, i32 2, i32 0), align 1
+  %i5980 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2972, i64 0, i32 2, i32 0), align 1
   %i5981 = shl i120 %i5980, 58
   %i5982 = ashr i120 %i5981, 105
   %i5983 = shl nsw i120 %i5982, 32
   %i5984 = trunc i120 %i5983 to i64
   %i5985 = ashr exact i64 %i5984, 32
   call fastcc void @transparent_crc(i64 %i5985, ptr @.str.3015, i32 signext undef)
-  %i5986 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2972, i64 0, i32 2, i32 0), align 1
+  %i5986 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2972, i64 0, i32 2, i32 0), align 1
   %i5987 = lshr i120 %i5986, 41
   %i5988 = trunc i120 %i5987 to i64
   %i5989 = and i64 %i5988, 63
   call fastcc void @transparent_crc(i64 %i5989, ptr @.str.3016, i32 signext undef)
-  %i5990 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2972, i64 0, i32 2, i32 0), align 1
+  %i5990 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2972, i64 0, i32 2, i32 0), align 1
   %i5991 = lshr i120 %i5990, 19
   %i5992 = trunc i120 %i5991 to i64
   %i5993 = and i64 %i5992, 4194303
   call fastcc void @transparent_crc(i64 %i5993, ptr @.str.3017, i32 signext undef)
-  %i5994 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2972, i64 0, i32 2, i32 0), align 1
+  %i5994 = load volatile i120, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2972, i64 0, i32 2, i32 0), align 1
   %i5995 = shl i120 %i5994, 101
   %i5996 = ashr exact i120 %i5995, 69
   %i5997 = trunc i120 %i5996 to i64
   %i5998 = ashr exact i64 %i5997, 32
   call fastcc void @transparent_crc(i64 %i5998, ptr @.str.3018, i32 signext undef)
-  %i5999 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2972, i64 0, i32 3, i32 0), align 2, !tbaa !44
+  %i5999 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2972, i64 0, i32 3, i32 0), align 2, !tbaa !44
   %i6000 = zext i8 %i5999 to i64
   call fastcc void @transparent_crc(i64 %i6000, ptr @.str.3019, i32 signext undef)
-  %i6001 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2972, i64 0, i32 3, i32 1), align 1, !tbaa !45
+  %i6001 = load i8, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2972, i64 0, i32 3, i32 1), align 1, !tbaa !45
   %i6002 = sext i8 %i6001 to i64
   call fastcc void @transparent_crc(i64 %i6002, ptr @.str.3020, i32 signext undef)
-  %i6003 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2972, i64 0, i32 3, i32 2), align 2, !tbaa !46
+  %i6003 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2972, i64 0, i32 3, i32 2), align 2, !tbaa !46
   %i6004 = sext i16 %i6003 to i64
   call fastcc void @transparent_crc(i64 %i6004, ptr @.str.3021, i32 signext undef)
-  %i6005 = load i64, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2972, i64 0, i32 3, i32 3), align 2, !tbaa !47
+  %i6005 = load i64, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2972, i64 0, i32 3, i32 3), align 2, !tbaa !47
   call fastcc void @transparent_crc(i64 %i6005, ptr @.str.3022, i32 signext undef)
-  %i6006 = load i32, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2972, i64 0, i32 3, i32 4), align 2, !tbaa !48
+  %i6006 = load i32, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2972, i64 0, i32 3, i32 4), align 2, !tbaa !48
   %i6007 = sext i32 %i6006 to i64
   call fastcc void @transparent_crc(i64 %i6007, ptr @.str.3023, i32 signext undef)
-  %i6008 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2972, i64 0, i32 4, i32 0), align 2
+  %i6008 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2972, i64 0, i32 4, i32 0), align 2
   %i6009 = ashr i128 %i6008, 99
   %i6010 = shl nsw i128 %i6009, 32
   %i6011 = trunc i128 %i6010 to i64
   %i6012 = ashr exact i64 %i6011, 32
   call fastcc void @transparent_crc(i64 %i6012, ptr @.str.3024, i32 signext undef)
-  %i6013 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2972, i64 0, i32 4, i32 0), align 2
+  %i6013 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2972, i64 0, i32 4, i32 0), align 2
   %i6014 = shl i128 %i6013, 29
   %i6015 = ashr i128 %i6014, 97
   %i6016 = shl nsw i128 %i6015, 32
   %i6017 = trunc i128 %i6016 to i64
   %i6018 = ashr exact i64 %i6017, 32
   call fastcc void @transparent_crc(i64 %i6018, ptr @.str.3025, i32 signext undef)
-  %i6019 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2972, i64 0, i32 4, i32 0), align 2
+  %i6019 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2972, i64 0, i32 4, i32 0), align 2
   call fastcc void @transparent_crc(i64 0, ptr @.str.3037, i32 signext undef)
-  %i6020 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2972, i64 0, i32 5, i32 1), align 2
+  %i6020 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2972, i64 0, i32 5, i32 1), align 2
   call fastcc void @transparent_crc(i64 undef, ptr @.str.3086, i32 signext undef)
-  %i6021 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2974, i64 0, i32 4, i32 0), align 2
+  %i6021 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2974, i64 0, i32 4, i32 0), align 2
   %i6022 = shl i128 %i6021, 29
   %i6023 = ashr i128 %i6022, 97
   %i6024 = shl nsw i128 %i6023, 32
   %i6025 = trunc i128 %i6024 to i64
   %i6026 = ashr exact i64 %i6025, 32
   call fastcc void @transparent_crc(i64 %i6026, ptr @.str.3087, i32 signext undef)
-  %i6027 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2974, i64 0, i32 4, i32 0), align 2
+  %i6027 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2974, i64 0, i32 4, i32 0), align 2
   %i6028 = shl i128 %i6027, 60
   %i6029 = ashr i128 %i6028, 108
   %i6030 = shl nsw i128 %i6029, 32
   %i6031 = trunc i128 %i6030 to i64
   %i6032 = ashr exact i64 %i6031, 32
   call fastcc void @transparent_crc(i64 %i6032, ptr @.str.3088, i32 signext undef)
-  %i6033 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2974, i64 0, i32 4, i32 0), align 2
+  %i6033 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2974, i64 0, i32 4, i32 0), align 2
   %i6034 = shl i128 %i6033, 80
   %i6035 = ashr i128 %i6034, 110
   %i6036 = shl nsw i128 %i6035, 32
   %i6037 = trunc i128 %i6036 to i64
   %i6038 = ashr exact i64 %i6037, 32
   call fastcc void @transparent_crc(i64 %i6038, ptr @.str.3089, i32 signext undef)
-  %i6039 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2974, i64 0, i32 4, i32 0), align 2
+  %i6039 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2974, i64 0, i32 4, i32 0), align 2
   %i6040 = lshr i128 %i6039, 28
   %i6041 = trunc i128 %i6040 to i64
   %i6042 = and i64 %i6041, 3
   call fastcc void @transparent_crc(i64 %i6042, ptr @.str.3090, i32 signext undef)
-  %i6043 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2974, i64 0, i32 4, i32 0), align 2
+  %i6043 = load volatile i128, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2974, i64 0, i32 4, i32 0), align 2
   %i6044 = shl i128 %i6043, 100
   %i6045 = ashr i128 %i6044, 107
   %i6046 = shl nsw i128 %i6045, 32
   %i6047 = trunc i128 %i6046 to i64
   %i6048 = ashr exact i64 %i6047, 32
   call fastcc void @transparent_crc(i64 %i6048, ptr @.str.3091, i32 signext undef)
-  %i6049 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2974, i64 0, i32 5, i32 0, i32 0), align 2
+  %i6049 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2974, i64 0, i32 5, i32 0, i32 0), align 2
   %i6050 = lshr i80 %i6049, 57
   %i6051 = trunc i80 %i6050 to i64
   call fastcc void @transparent_crc(i64 %i6051, ptr @.str.3092, i32 signext undef)
-  %i6052 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2974, i64 0, i32 5, i32 0, i32 0), align 2
+  %i6052 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2974, i64 0, i32 5, i32 0, i32 0), align 2
   %i6053 = shl i80 %i6052, 23
   %i6054 = ashr i80 %i6053, 64
   %i6055 = shl nsw i80 %i6054, 32
   %i6056 = trunc i80 %i6055 to i64
   %i6057 = ashr exact i64 %i6056, 32
   call fastcc void @transparent_crc(i64 %i6057, ptr @.str.3093, i32 signext undef)
-  %i6058 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2974, i64 0, i32 5, i32 0, i32 0), align 2
+  %i6058 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2974, i64 0, i32 5, i32 0, i32 0), align 2
   %i6059 = shl i80 %i6058, 39
   %i6060 = ashr i80 %i6059, 62
   %i6061 = shl nsw i80 %i6060, 32
   %i6062 = trunc i80 %i6061 to i64
   %i6063 = ashr exact i64 %i6062, 32
   call fastcc void @transparent_crc(i64 %i6063, ptr @.str.3094, i32 signext undef)
-  %i6064 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2974, i64 0, i32 5, i32 0, i32 0), align 2
+  %i6064 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2974, i64 0, i32 5, i32 0, i32 0), align 2
   %i6065 = shl i80 %i6064, 57
   %i6066 = ashr i80 %i6065, 58
   %i6067 = shl nsw i80 %i6066, 32
   %i6068 = trunc i80 %i6067 to i64
   %i6069 = ashr exact i64 %i6068, 32
   call fastcc void @transparent_crc(i64 %i6069, ptr @.str.3095, i32 signext undef)
-  %i6070 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2974, i64 0, i32 5, i32 1), align 2
+  %i6070 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2974, i64 0, i32 5, i32 1), align 2
   %i6071 = lshr i80 %i6070, 49
   %i6072 = trunc i80 %i6071 to i64
   call fastcc void @transparent_crc(i64 %i6072, ptr @.str.3096, i32 signext undef)
-  %i6073 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2974, i64 0, i32 5, i32 1), align 2
+  %i6073 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2974, i64 0, i32 5, i32 1), align 2
   %i6074 = lshr i80 %i6073, 24
   %i6075 = trunc i80 %i6074 to i64
   %i6076 = and i64 %i6075, 33554431
   call fastcc void @transparent_crc(i64 %i6076, ptr @.str.3097, i32 signext undef)
-  %i6077 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2974, i64 0, i32 5, i32 1), align 2
+  %i6077 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2974, i64 0, i32 5, i32 1), align 2
   %i6078 = shl i80 %i6077, 56
   %i6079 = ashr i80 %i6078, 68
   %i6080 = shl nsw i80 %i6079, 32
   %i6081 = trunc i80 %i6080 to i64
   %i6082 = ashr exact i64 %i6081, 32
   call fastcc void @transparent_crc(i64 %i6082, ptr @.str.3098, i32 signext undef)
-  %i6083 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2974, i64 0, i32 5, i32 1), align 2
+  %i6083 = load i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2974, i64 0, i32 5, i32 1), align 2
   %i6084 = lshr i80 %i6083, 11
   %i6085 = trunc i80 %i6084 to i64
   %i6086 = and i64 %i6085, 1
   call fastcc void @transparent_crc(i64 %i6086, ptr @.str.3099, i32 signext undef)
-  %i6087 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2974, i64 0, i32 5, i32 1), align 2
+  %i6087 = load volatile i80, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2974, i64 0, i32 5, i32 1), align 2
   %i6088 = shl i80 %i6087, 69
   %i6089 = ashr i80 %i6088, 72
   %i6090 = shl nsw i80 %i6089, 32
   %i6091 = trunc i80 %i6090 to i64
   %i6092 = ashr exact i64 %i6091, 32
   call fastcc void @transparent_crc(i64 %i6092, ptr @.str.3100, i32 signext undef)
-  %i6093 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2974, i64 0, i32 6), align 2, !tbaa !49
+  %i6093 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2974, i64 0, i32 6), align 2, !tbaa !49
   %i6094 = sext i16 %i6093 to i64
   call fastcc void @transparent_crc(i64 %i6094, ptr @.str.3101, i32 signext undef)
-  %i6095 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>* @g_2974, i64 0, i32 7), align 2, !tbaa !50
+  %i6095 = load i16, ptr getelementptr inbounds (<{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, ptr @g_2974, i64 0, i32 7), align 2, !tbaa !50
   %i6096 = zext i16 %i6095 to i64
   call fastcc void @transparent_crc(i64 %i6096, ptr @.str.3102, i32 signext undef)
   %i6097 = load i16, ptr undef, align 2, !tbaa !23
   %i6098 = sext i16 %i6097 to i64
   call fastcc void @transparent_crc(i64 %i6098, ptr @.str.3103, i32 signext undef)
-  %i6099 = getelementptr inbounds [4 x %5], ptr bitcast (<{ <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }>, <{ i16, i8, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, %0, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i16, i16 }> }>* @g_2975 to ptr), i64 0, i64 0, i32 1
+  %i6099 = getelementptr inbounds [4 x %5], ptr @g_2975, i64 0, i64 0, i32 1
   %i6100 = load i8, ptr %i6099, align 2, !tbaa !51
   %i6101 = sext i8 %i6100 to i64
   call fastcc void @transparent_crc(i64 %i6101, ptr @.str.3104, i32 signext undef)
@@ -11248,7 +11248,7 @@ bb25:                                             ; preds = %bb15
   %i6180 = ashr exact i64 %i6179, 32
   call fastcc void @transparent_crc(i64 %i6180, ptr @.str.3141, i32 signext undef)
   call fastcc void @transparent_crc(i64 440374213169866530, ptr @.str.3142, i32 signext undef)
-  %i6181 = load i32, ptr getelementptr inbounds ({ i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, ptr @g_3090, i64 0, i32 0), align 4, !tbaa !33
+  %i6181 = load i32, ptr @g_3090, align 4, !tbaa !33
   %i6182 = zext i32 %i6181 to i64
   call fastcc void @transparent_crc(i64 %i6182, ptr @.str.3143, i32 signext undef)
   %i6183 = load i8, ptr getelementptr inbounds ({ i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, ptr @g_3090, i64 0, i32 1), align 4, !tbaa !6
@@ -11278,45 +11278,45 @@ bb25:                                             ; preds = %bb15
   %i6202 = trunc i80 %i6201 to i64
   %i6203 = ashr exact i64 %i6202, 32
   call fastcc void @transparent_crc(i64 %i6203, ptr @.str.3155, i32 signext undef)
-  %i6204 = load i16, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, <{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>* @g_3108, i64 0, i32 0), align 2, !tbaa !56
+  %i6204 = load i16, ptr @g_3108, align 2, !tbaa !56
   %i6205 = sext i16 %i6204 to i64
   call fastcc void @transparent_crc(i64 %i6205, ptr @.str.3156, i32 signext undef)
-  %i6206 = load i32, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, <{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>* @g_3108, i64 0, i32 1), align 2, !tbaa !57
+  %i6206 = load i32, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, ptr @g_3108, i64 0, i32 1), align 2, !tbaa !57
   %i6207 = sext i32 %i6206 to i64
   call fastcc void @transparent_crc(i64 %i6207, ptr @.str.3157, i32 signext undef)
-  %i6208 = load volatile i8, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, <{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>* @g_3108, i64 0, i32 2), align 2, !tbaa !58
+  %i6208 = load volatile i8, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, ptr @g_3108, i64 0, i32 2), align 2, !tbaa !58
   call fastcc void @transparent_crc(i64 undef, ptr @.str.3158, i32 signext undef)
-  %i6209 = load i16, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, <{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>* @g_3108, i64 0, i32 3), align 1, !tbaa !59
+  %i6209 = load i16, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, ptr @g_3108, i64 0, i32 3), align 1, !tbaa !59
   %i6210 = sext i16 %i6209 to i64
   call fastcc void @transparent_crc(i64 %i6210, ptr @.str.3159, i32 signext undef)
-  %i6211 = load i80, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, <{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>* @g_3108, i64 0, i32 4, i32 0), align 1
+  %i6211 = load i80, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, ptr @g_3108, i64 0, i32 4, i32 0), align 1
   %i6212 = lshr i80 %i6211, 57
   %i6213 = trunc i80 %i6212 to i64
   call fastcc void @transparent_crc(i64 %i6213, ptr @.str.3160, i32 signext undef)
-  %i6214 = load i80, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, <{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>* @g_3108, i64 0, i32 4, i32 0), align 1
+  %i6214 = load i80, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, ptr @g_3108, i64 0, i32 4, i32 0), align 1
   %i6215 = shl i80 %i6214, 23
   %i6216 = ashr i80 %i6215, 64
   %i6217 = shl nsw i80 %i6216, 32
   %i6218 = trunc i80 %i6217 to i64
   %i6219 = ashr exact i64 %i6218, 32
   call fastcc void @transparent_crc(i64 %i6219, ptr @.str.3161, i32 signext undef)
-  %i6220 = load i80, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, <{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>* @g_3108, i64 0, i32 4, i32 0), align 1
+  %i6220 = load i80, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, ptr @g_3108, i64 0, i32 4, i32 0), align 1
   %i6221 = shl i80 %i6220, 39
   %i6222 = ashr i80 %i6221, 62
   %i6223 = shl nsw i80 %i6222, 32
   %i6224 = trunc i80 %i6223 to i64
   %i6225 = ashr exact i64 %i6224, 32
   call fastcc void @transparent_crc(i64 %i6225, ptr @.str.3162, i32 signext undef)
-  %i6226 = load i80, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, <{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>* @g_3108, i64 0, i32 4, i32 0), align 1
+  %i6226 = load i80, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, ptr @g_3108, i64 0, i32 4, i32 0), align 1
   %i6227 = shl i80 %i6226, 57
   %i6228 = ashr i80 %i6227, 58
   %i6229 = shl nsw i80 %i6228, 32
   %i6230 = trunc i80 %i6229 to i64
   %i6231 = ashr exact i64 %i6230, 32
   call fastcc void @transparent_crc(i64 %i6231, ptr @.str.3163, i32 signext undef)
-  %i6232 = load volatile i64, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, <{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>* @g_3108, i64 0, i32 5), align 1, !tbaa !53
+  %i6232 = load volatile i64, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, ptr @g_3108, i64 0, i32 5), align 1, !tbaa !53
   call fastcc void @transparent_crc(i64 %i6232, ptr @.str.3164, i32 signext undef)
-  %i6233 = load volatile i64, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, <{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>* @g_3108, i64 0, i32 6), align 1, !tbaa !55
+  %i6233 = load volatile i64, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, ptr @g_3108, i64 0, i32 6), align 1, !tbaa !55
   call fastcc void @transparent_crc(i64 %i6233, ptr @.str.3165, i32 signext undef)
   %i6234 = load volatile i80, ptr undef, align 2
   %i6235 = lshr i80 %i6234, 57
@@ -11434,7 +11434,7 @@ bb25:                                             ; preds = %bb15
   %i6328 = trunc i80 %i6327 to i64
   %i6329 = ashr exact i64 %i6328, 32
   call fastcc void @transparent_crc(i64 %i6329, ptr @.str.3169, i32 signext undef)
-  %i6330 = getelementptr inbounds [10 x [7 x [3 x %4]]], ptr bitcast (<{ <{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, <{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, <{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, <{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, <{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, <{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, <{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, <{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, <{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, <{ <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }> }>* @g_3202 to ptr), i64 0, i64 0, i64 0, i64 2, i32 1
+  %i6330 = getelementptr inbounds [10 x [7 x [3 x %4]]], ptr @g_3202, i64 0, i64 0, i64 0, i64 2, i32 1
   %i6332 = load i80, ptr %i6330, align 2
   %i6333 = lshr i80 %i6332, 49
   %i6334 = trunc i80 %i6333 to i64
@@ -11545,7 +11545,7 @@ bb25:                                             ; preds = %bb15
   %i6417 = trunc i80 %i6416 to i64
   %i6418 = and i64 %i6417, 262143
   call fastcc void @transparent_crc(i64 %i6418, ptr @.str.3191, i32 signext 0)
-  %i6419 = getelementptr inbounds [4 x [5 x [7 x %7]]], ptr bitcast (<{ <{ <{ { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, <{ <{ { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, <{ <{ { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }>, <{ <{ { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }>, <{ { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }> }> }>* @g_3370 to ptr), i64 0, i64 0, i64 0, i64 4
+  %i6419 = getelementptr inbounds [4 x [5 x [7 x %7]]], ptr @g_3370, i64 0, i64 0, i64 0, i64 4
   %i6421 = load volatile i80, ptr %i6419, align 2
   %i6422 = ashr i80 %i6421, 73
   %i6423 = shl nsw i80 %i6422, 32
@@ -11630,7 +11630,7 @@ bb25:                                             ; preds = %bb15
   %i6488 = trunc i80 %i6487 to i64
   %i6489 = and i64 %i6488, 262143
   call fastcc void @transparent_crc(i64 %i6489, ptr @.str.3191, i32 signext 0)
-  %i6490 = load i32, ptr getelementptr inbounds ({ i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, ptr @g_3431, i64 0, i32 0), align 4, !tbaa !33
+  %i6490 = load i32, ptr @g_3431, align 4, !tbaa !33
   %i6491 = zext i32 %i6490 to i64
   call fastcc void @transparent_crc(i64 %i6491, ptr @.str.3192, i32 signext undef)
   %i6492 = load i8, ptr getelementptr inbounds ({ i32, i8, i16, i32, { { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } }, ptr @g_3431, i64 0, i32 1), align 4, !tbaa !6
@@ -11703,29 +11703,29 @@ bb25:                                             ; preds = %bb15
   %i6545 = load i16, ptr undef, align 2, !tbaa !20
   %i6546 = zext i16 %i6545 to i64
   call fastcc void @transparent_crc(i64 %i6546, ptr @.str.3205, i32 signext 0)
-  %i6547 = load i16, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, <{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>* @g_3567, i64 0, i32 0), align 2, !tbaa !56
+  %i6547 = load i16, ptr @g_3567, align 2, !tbaa !56
   %i6548 = sext i16 %i6547 to i64
   call fastcc void @transparent_crc(i64 %i6548, ptr @.str.3206, i32 signext undef)
-  %i6549 = load i32, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, <{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>* @g_3567, i64 0, i32 1), align 2, !tbaa !57
+  %i6549 = load i32, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, ptr @g_3567, i64 0, i32 1), align 2, !tbaa !57
   %i6550 = sext i32 %i6549 to i64
   call fastcc void @transparent_crc(i64 %i6550, ptr @.str.3207, i32 signext undef)
-  %i6551 = load volatile i8, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, <{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>* @g_3567, i64 0, i32 2), align 2, !tbaa !58
+  %i6551 = load volatile i8, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, ptr @g_3567, i64 0, i32 2), align 2, !tbaa !58
   call fastcc void @transparent_crc(i64 undef, ptr @.str.3208, i32 signext undef)
-  %i6552 = load i16, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, <{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>* @g_3567, i64 0, i32 3), align 1, !tbaa !59
+  %i6552 = load i16, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, ptr @g_3567, i64 0, i32 3), align 1, !tbaa !59
   %i6553 = sext i16 %i6552 to i64
   call fastcc void @transparent_crc(i64 %i6553, ptr @.str.3209, i32 signext undef)
-  %i6554 = load i80, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, <{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>* @g_3567, i64 0, i32 4, i32 0), align 1
+  %i6554 = load i80, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, ptr @g_3567, i64 0, i32 4, i32 0), align 1
   %i6555 = lshr i80 %i6554, 57
   %i6556 = trunc i80 %i6555 to i64
   call fastcc void @transparent_crc(i64 %i6556, ptr @.str.3210, i32 signext undef)
-  %i6557 = load i80, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, <{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>* @g_3567, i64 0, i32 4, i32 0), align 1
+  %i6557 = load i80, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, ptr @g_3567, i64 0, i32 4, i32 0), align 1
   %i6558 = shl i80 %i6557, 23
   %i6559 = ashr i80 %i6558, 64
   %i6560 = shl nsw i80 %i6559, 32
   %i6561 = trunc i80 %i6560 to i64
   %i6562 = ashr exact i64 %i6561, 32
   call fastcc void @transparent_crc(i64 %i6562, ptr @.str.3211, i32 signext undef)
-  %i6563 = load i80, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, <{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>* @g_3567, i64 0, i32 4, i32 0), align 1
+  %i6563 = load i80, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, ptr @g_3567, i64 0, i32 4, i32 0), align 1
   %i6564 = shl i80 %i6563, 39
   %i6565 = ashr i80 %i6564, 62
   %i6566 = shl nsw i80 %i6565, 32
@@ -11733,49 +11733,49 @@ bb25:                                             ; preds = %bb15
   %i6568 = ashr exact i64 %i6567, 32
   call fastcc void @transparent_crc(i64 %i6568, ptr @.str.3212, i32 signext undef)
   call fastcc void @transparent_crc(i64 0, ptr @.str.3213, i32 signext undef)
-  %i6569 = load volatile i64, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, <{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>* @g_3567, i64 0, i32 5), align 1, !tbaa !53
+  %i6569 = load volatile i64, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, ptr @g_3567, i64 0, i32 5), align 1, !tbaa !53
   call fastcc void @transparent_crc(i64 %i6569, ptr @.str.3214, i32 signext undef)
-  %i6570 = load volatile i64, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, <{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>* @g_3567, i64 0, i32 6), align 1, !tbaa !55
+  %i6570 = load volatile i64, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, ptr @g_3567, i64 0, i32 6), align 1, !tbaa !55
   call fastcc void @transparent_crc(i64 %i6570, ptr @.str.3215, i32 signext undef)
-  %i6571 = load i16, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, <{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>* @g_3568, i64 0, i32 0), align 2, !tbaa !56
+  %i6571 = load i16, ptr @g_3568, align 2, !tbaa !56
   %i6572 = sext i16 %i6571 to i64
   call fastcc void @transparent_crc(i64 %i6572, ptr @.str.3216, i32 signext undef)
-  %i6573 = load i32, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, <{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>* @g_3568, i64 0, i32 1), align 2, !tbaa !57
+  %i6573 = load i32, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, ptr @g_3568, i64 0, i32 1), align 2, !tbaa !57
   %i6574 = sext i32 %i6573 to i64
   call fastcc void @transparent_crc(i64 %i6574, ptr @.str.3217, i32 signext undef)
-  %i6575 = load volatile i8, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, <{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>* @g_3568, i64 0, i32 2), align 2, !tbaa !58
+  %i6575 = load volatile i8, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, ptr @g_3568, i64 0, i32 2), align 2, !tbaa !58
   call fastcc void @transparent_crc(i64 undef, ptr @.str.3218, i32 signext undef)
-  %i6576 = load i16, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, <{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>* @g_3568, i64 0, i32 3), align 1, !tbaa !59
+  %i6576 = load i16, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, ptr @g_3568, i64 0, i32 3), align 1, !tbaa !59
   %i6577 = sext i16 %i6576 to i64
   call fastcc void @transparent_crc(i64 %i6577, ptr @.str.3219, i32 signext undef)
-  %i6578 = load i80, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, <{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>* @g_3568, i64 0, i32 4, i32 0), align 1
+  %i6578 = load i80, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, ptr @g_3568, i64 0, i32 4, i32 0), align 1
   %i6579 = lshr i80 %i6578, 57
   %i6580 = trunc i80 %i6579 to i64
   call fastcc void @transparent_crc(i64 %i6580, ptr @.str.3220, i32 signext undef)
-  %i6581 = load i80, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, <{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>* @g_3568, i64 0, i32 4, i32 0), align 1
+  %i6581 = load i80, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, ptr @g_3568, i64 0, i32 4, i32 0), align 1
   %i6582 = shl i80 %i6581, 23
   %i6583 = ashr i80 %i6582, 64
   %i6584 = shl nsw i80 %i6583, 32
   %i6585 = trunc i80 %i6584 to i64
   %i6586 = ashr exact i64 %i6585, 32
   call fastcc void @transparent_crc(i64 %i6586, ptr @.str.3221, i32 signext undef)
-  %i6587 = load i80, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, <{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>* @g_3568, i64 0, i32 4, i32 0), align 1
+  %i6587 = load i80, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, ptr @g_3568, i64 0, i32 4, i32 0), align 1
   %i6588 = shl i80 %i6587, 39
   %i6589 = ashr i80 %i6588, 62
   %i6590 = shl nsw i80 %i6589, 32
   %i6591 = trunc i80 %i6590 to i64
   %i6592 = ashr exact i64 %i6591, 32
   call fastcc void @transparent_crc(i64 %i6592, ptr @.str.3222, i32 signext undef)
-  %i6593 = load i80, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, <{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>* @g_3568, i64 0, i32 4, i32 0), align 1
+  %i6593 = load i80, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, ptr @g_3568, i64 0, i32 4, i32 0), align 1
   %i6594 = shl i80 %i6593, 57
   %i6595 = ashr i80 %i6594, 58
   %i6596 = shl nsw i80 %i6595, 32
   %i6597 = trunc i80 %i6596 to i64
   %i6598 = ashr exact i64 %i6597, 32
   call fastcc void @transparent_crc(i64 %i6598, ptr @.str.3223, i32 signext undef)
-  %i6599 = load volatile i64, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, <{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>* @g_3568, i64 0, i32 5), align 1, !tbaa !53
+  %i6599 = load volatile i64, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, ptr @g_3568, i64 0, i32 5), align 1, !tbaa !53
   call fastcc void @transparent_crc(i64 %i6599, ptr @.str.3224, i32 signext undef)
-  %i6600 = load volatile i64, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, <{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>* @g_3568, i64 0, i32 6), align 1, !tbaa !55
+  %i6600 = load volatile i64, ptr getelementptr inbounds (<{ i16, i32, i8, i16, { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }, i64, i64 }>, ptr @g_3568, i64 0, i32 6), align 1, !tbaa !55
   call fastcc void @transparent_crc(i64 %i6600, ptr @.str.3225, i32 signext undef)
   call fastcc void @transparent_crc(i64 2184720098, ptr @.str.3226, i32 signext 0)
   call fastcc void @transparent_crc(i64 2184720098, ptr @.str.3226, i32 signext 0)

diff  --git a/llvm/test/CodeGen/SystemZ/RAbasic-invalid-LR-update.mir b/llvm/test/CodeGen/SystemZ/RAbasic-invalid-LR-update.mir
index 79fa84f8a2a49..3b308ce3d0d24 100644
--- a/llvm/test/CodeGen/SystemZ/RAbasic-invalid-LR-update.mir
+++ b/llvm/test/CodeGen/SystemZ/RAbasic-invalid-LR-update.mir
@@ -194,7 +194,7 @@ body:             |
     %36 = VLVGH %36, %20.subreg_l32, $noreg, 0
     %36 = VLVGH %36, %34.subreg_l32, $noreg, 1
     dead %36 = VLVGH %36, %40.subreg_l32, $noreg, 2
-    %4 = LG undef %42, 0, $noreg :: (load (s64) from `i64* undef`)
+    %4 = LG undef %42, 0, $noreg :: (load (s64) from `ptr undef`)
     undef %57.subreg_h64 = LLILL 0
     undef %66.subreg_h64 = LLILL 0
     undef %79.subreg_h64 = LLILL 0

diff  --git a/llvm/test/CodeGen/SystemZ/clear-liverange-spillreg.mir b/llvm/test/CodeGen/SystemZ/clear-liverange-spillreg.mir
index 13110579bc0f3..7ff7d9b8b7094 100644
--- a/llvm/test/CodeGen/SystemZ/clear-liverange-spillreg.mir
+++ b/llvm/test/CodeGen/SystemZ/clear-liverange-spillreg.mir
@@ -12,9 +12,9 @@
   @rec_mbY8x8 = external local_unnamed_addr global [16 x [16 x i16]], align 2
   @bi_pred_me = external local_unnamed_addr global i32, align 4
 
-  declare signext i32 @Get_Direct_Cost8x8(i32 signext, i32*) local_unnamed_addr #0
-  declare void @store_coding_state(i32*) local_unnamed_addr #0
-  declare void @reset_coding_state(i32*) local_unnamed_addr #0
+  declare signext i32 @Get_Direct_Cost8x8(i32 signext, ptr) local_unnamed_addr #0
+  declare void @store_coding_state(ptr) local_unnamed_addr #0
+  declare void @reset_coding_state(ptr) local_unnamed_addr #0
   declare void @SetRefAndMotionVectors(i32 signext, i32 signext, i32 signext, i32 signext, i32 signext) local_unnamed_addr #2
   declare signext i32 @Get_Direct_CostMB(double) local_unnamed_addr #0
   declare void @SetModesAndRefframeForBlocks(i32 signext) local_unnamed_addr #1

diff  --git a/llvm/test/CodeGen/SystemZ/cond-move-04.mir b/llvm/test/CodeGen/SystemZ/cond-move-04.mir
index d284128bb4a31..97aa00f582921 100644
--- a/llvm/test/CodeGen/SystemZ/cond-move-04.mir
+++ b/llvm/test/CodeGen/SystemZ/cond-move-04.mir
@@ -6,9 +6,9 @@
 
 --- |
   
-  declare i8* @foo(i8*, i32 signext, i32 signext) local_unnamed_addr
+  declare ptr @foo(ptr, i32 signext, i32 signext) local_unnamed_addr
   
-  define i8* @fun(i8* returned) {
+  define ptr @fun(ptr returned) {
     br label %2
   
   ; <label>:2:                                      ; preds = %6, %1
@@ -26,7 +26,7 @@
   
   ; <label>:6:                                      ; preds = %5, %4, %2
     %7 = phi i32 [ 4, %2 ], [ undef, %4 ], [ 10, %5 ]
-    %8 = call i8* @foo(i8* undef, i32 signext undef, i32 signext %7)
+    %8 = call ptr @foo(ptr undef, i32 signext undef, i32 signext %7)
     br label %2
   }
 

diff  --git a/llvm/test/CodeGen/SystemZ/cond-move-05.mir b/llvm/test/CodeGen/SystemZ/cond-move-05.mir
index 7cc69bc8f59da..b2b7609bf102f 100644
--- a/llvm/test/CodeGen/SystemZ/cond-move-05.mir
+++ b/llvm/test/CodeGen/SystemZ/cond-move-05.mir
@@ -16,7 +16,7 @@
   ; Function Attrs: nounwind
   define void @main() local_unnamed_addr #0 {
   entry:
-    %0 = load i32, i32* @g_74, align 4
+    %0 = load i32, ptr @g_74, align 4
     %conv478.i.i = sext i32 %0 to i64
     %cond.i15.i.i = lshr i32 1, 0
     %conv2.i16.i.i = zext i32 %cond.i15.i.i to i64

diff  --git a/llvm/test/CodeGen/SystemZ/cond-move-08.mir b/llvm/test/CodeGen/SystemZ/cond-move-08.mir
index 067b90f938e44..93aa5626b8e89 100644
--- a/llvm/test/CodeGen/SystemZ/cond-move-08.mir
+++ b/llvm/test/CodeGen/SystemZ/cond-move-08.mir
@@ -22,17 +22,17 @@
     br label %bb7
 
   bb7:                                              ; preds = %bb7, %bb6
-    %lsr.iv1 = phi [512 x i32]* [ %0, %bb7 ], [ undef, %bb6 ]
+    %lsr.iv1 = phi ptr [ %0, %bb7 ], [ undef, %bb6 ]
     %tmp8 = phi i32 [ %tmp27, %bb7 ], [ -1000000, %bb6 ]
     %tmp9 = phi i64 [ %tmp28, %bb7 ], [ 0, %bb6 ]
     %lsr3 = trunc i64 %tmp9 to i32
-    %lsr.iv12 = bitcast [512 x i32]* %lsr.iv1 to i32*
-    %tmp11 = load i32, i32* %lsr.iv12
+    %lsr.iv12 = bitcast ptr %lsr.iv1 to ptr
+    %tmp11 = load i32, ptr %lsr.iv12
     %tmp12 = icmp sgt i32 %tmp11, undef
     %tmp13 = trunc i64 %tmp9 to i32
     %tmp14 = select i1 %tmp12, i32 %lsr3, i32 0
     %tmp15 = select i1 %tmp12, i32 %tmp13, i32 %tmp8
-    %tmp16 = load i32, i32* undef
+    %tmp16 = load i32, ptr undef
     %tmp17 = select i1 false, i32 undef, i32 %tmp14
     %tmp18 = select i1 false, i32 undef, i32 %tmp15
     %tmp19 = select i1 false, i32 %tmp16, i32 undef
@@ -46,8 +46,8 @@
     %tmp27 = select i1 %tmp24, i32 %tmp25, i32 %tmp21
     %tmp28 = add nuw nsw i64 %tmp9, 4
     %tmp29 = icmp eq i64 undef, 0
-    %scevgep = getelementptr [512 x i32], [512 x i32]* %lsr.iv1, i64 0, i64 4
-    %0 = bitcast i32* %scevgep to [512 x i32]*
+    %scevgep = getelementptr [512 x i32], ptr %lsr.iv1, i64 0, i64 4
+    %0 = bitcast ptr %scevgep to ptr
     br i1 %tmp29, label %bb30, label %bb7
 
   bb30:                                             ; preds = %bb7
@@ -56,7 +56,7 @@
 
   bb33:                                             ; preds = %bb30
     call void @fun()
-    store i32 %tmp26, i32* @globvar
+    store i32 %tmp26, ptr @globvar
     %tmp34 = icmp ugt i32 undef, 1
     br label %bb6
 
@@ -71,7 +71,7 @@
   }
 
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #1
+  declare void @llvm.stackprotector(ptr, ptr) #1
 
   attributes #0 = { "target-cpu"="z15" }
   attributes #1 = { nounwind }
@@ -134,7 +134,7 @@ body:             |
     %16:grx32bit = COPY %28.subreg_l32
     %16:grx32bit = LOCHIMux %16, 0, 14, 12, implicit $cc
     %17:grx32bit = SELRMux %27, %28.subreg_l32, 14, 2, implicit killed $cc
-    %18:gr32bit = LMux undef %19:addr64bit, 0, $noreg :: (load (s32) from `i32* undef`)
+    %18:gr32bit = LMux undef %19:addr64bit, 0, $noreg :: (load (s32) from `ptr undef`)
     %20:grx32bit = COPY %28.subreg_l32
     %20:grx32bit = OILMux %20, 3, implicit-def dead $cc
     CR undef %21:gr32bit, %18, implicit-def $cc

diff  --git a/llvm/test/CodeGen/SystemZ/cond-move-regalloc-hints-02.mir b/llvm/test/CodeGen/SystemZ/cond-move-regalloc-hints-02.mir
index 556d2d70d4434..37e29800fb1d6 100644
--- a/llvm/test/CodeGen/SystemZ/cond-move-regalloc-hints-02.mir
+++ b/llvm/test/CodeGen/SystemZ/cond-move-regalloc-hints-02.mir
@@ -5,7 +5,7 @@
 
 
 --- |
-  define i32 @fun(i32 %arg, i32 %arg1, i32 %arg2, i32* %arg3) { ret i32 0 }
+  define i32 @fun(i32 %arg, i32 %arg1, i32 %arg2, ptr %arg3) { ret i32 0 }
   declare void @foo(i32)
 ...
 # CHECK-LABEL: fun
@@ -39,7 +39,7 @@ body:             |
     %9:grx32bit = LHIMux 66
   
   bb.1:
-    %6:grx32bit = LLCMux undef %7:addr64bit, 0, $noreg :: (load (s8) from `i8* undef`)
+    %6:grx32bit = LLCMux undef %7:addr64bit, 0, $noreg :: (load (s8) from `ptr undef`)
     CHIMux %6, 1, implicit-def $cc
     %11:gr32bit = SELRMux %8, %9:grx32bit, 14, 6, implicit killed $cc
     CHIMux %6, 2, implicit-def $cc

diff  --git a/llvm/test/CodeGen/SystemZ/cond-move-regalloc-hints.mir b/llvm/test/CodeGen/SystemZ/cond-move-regalloc-hints.mir
index 686bbc396a5d9..e7e1eaf8f8fdc 100644
--- a/llvm/test/CodeGen/SystemZ/cond-move-regalloc-hints.mir
+++ b/llvm/test/CodeGen/SystemZ/cond-move-regalloc-hints.mir
@@ -10,13 +10,13 @@
   target datalayout = "E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-v128:64-a:8:16-n32:64"
   target triple = "s390x-unknown-linux-gnu"
   
-  %0 = type { i32, i32, i32, float, float, float, i8*, i32 }
+  %0 = type { i32, i32, i32, float, float, float, ptr, i32 }
   
   define void @fun(i32 signext %arg, i32 zeroext %arg1) #0 {
   bb:
     %tmp = sext i32 %arg to i64
-    %tmp2 = tail call i8* @sre_malloc()
-    %tmp4 = tail call i8* @sre_malloc()
+    %tmp2 = tail call ptr @sre_malloc()
+    %tmp4 = tail call ptr @sre_malloc()
     %tmp6 = add i32 %arg, -1
     tail call void @malloc()
     %0 = trunc i32 %arg to i2
@@ -36,29 +36,29 @@
     %tmp15 = phi i64 [ 0, %bb8 ], [ %tmp19, %bb39 ]
     %tmp17 = phi i32 [ %tmp6, %bb8 ], [ %tmp35, %bb39 ]
     %tmp18 = phi i32 [ 0, %bb8 ], [ %tmp34, %bb39 ]
-    %2 = bitcast i8* %tmp2 to float**
+    %2 = bitcast ptr %tmp2 to ptr
     %tmp19 = add nuw nsw i64 %tmp15, 1
     %3 = zext i2 %lsr.iv8 to i64
     %4 = mul i64 %3, -1
-    %tmp21 = getelementptr inbounds float*, float** %2, i64 %tmp15
-    %tmp22 = load float*, float** %tmp21
+    %tmp21 = getelementptr inbounds ptr, ptr %2, i64 %tmp15
+    %tmp22 = load ptr, ptr %tmp21
     %tmp23 = trunc i64 %tmp15 to i32
-    %scevgep = getelementptr float, float* %tmp22, i64 %tmp15
+    %scevgep = getelementptr float, ptr %tmp22, i64 %tmp15
     br label %bb25
   
   bb25:                                             ; preds = %bb25, %bb14
     %lsr.iv10 = phi i64 [ %lsr.iv.next11, %bb25 ], [ %4, %bb14 ]
-    %lsr.iv3 = phi float* [ %scevgep4, %bb25 ], [ %scevgep, %bb14 ]
+    %lsr.iv3 = phi ptr [ %scevgep4, %bb25 ], [ %scevgep, %bb14 ]
     %lsr.iv1 = phi i32 [ %lsr.iv.next2, %bb25 ], [ %lsr.iv, %bb14 ]
     %tmp27 = phi i32 [ %tmp35, %bb25 ], [ %tmp17, %bb14 ]
     %tmp28 = phi i32 [ %tmp34, %bb25 ], [ %tmp18, %bb14 ]
-    %scevgep5 = getelementptr float, float* %lsr.iv3, i64 1
-    %tmp31 = load float, float* %scevgep5
+    %scevgep5 = getelementptr float, ptr %lsr.iv3, i64 1
+    %tmp31 = load float, ptr %scevgep5
     %tmp32 = fcmp olt float %tmp31, undef
     %tmp34 = select i1 %tmp32, i32 %lsr.iv1, i32 %tmp28
     %tmp35 = select i1 %tmp32, i32 %tmp23, i32 %tmp27
     %lsr.iv.next2 = add i32 %lsr.iv1, 1
-    %scevgep4 = getelementptr float, float* %lsr.iv3, i64 1
+    %scevgep4 = getelementptr float, ptr %lsr.iv3, i64 1
     %lsr.iv.next11 = add i64 %lsr.iv10, 1
     %tmp38 = icmp eq i64 %lsr.iv.next11, 0
     br i1 %tmp38, label %bb39, label %bb25
@@ -70,38 +70,38 @@
     br i1 %tmp41, label %bb42, label %bb14
   
   bb42:                                             ; preds = %bb39
-    %5 = bitcast i8* %tmp4 to i32*
-    %tmp43 = getelementptr inbounds i32, i32* %5, i64 undef
-    %tmp44 = load i32, i32* %tmp43
+    %5 = bitcast ptr %tmp4 to ptr
+    %tmp43 = getelementptr inbounds i32, ptr %5, i64 undef
+    %tmp44 = load i32, ptr %tmp43
     %tmp45 = sub nsw i32 %tmp44, %arg
     %tmp46 = sext i32 %tmp45 to i64
-    %tmp47 = getelementptr inbounds %0, %0* null, i64 %tmp46, i32 7
-    %tmp48 = load i32, i32* %tmp47
+    %tmp47 = getelementptr inbounds %0, ptr null, i64 %tmp46, i32 7
+    %tmp48 = load i32, ptr %tmp47
     %tmp49 = add nsw i32 0, %tmp48
-    store i32 %tmp49, i32* undef
+    store i32 %tmp49, ptr undef
     %cond = icmp eq i32 %arg1, 0
     br i1 %cond, label %bb52, label %bb54
   
   bb52:                                             ; preds = %bb42
-    %tmp5312 = bitcast float* undef to float*
+    %tmp5312 = bitcast ptr undef to ptr
     br label %bb54
   
   bb54:                                             ; preds = %bb42, %bb52
-    %6 = bitcast i8* %tmp4 to i32*
+    %6 = bitcast ptr %tmp4 to ptr
     %tmp55 = add i32 0, %arg
-    %tmp56 = getelementptr inbounds i32, i32* %6, i64 undef
-    store i32 %tmp55, i32* %tmp56
+    %tmp56 = getelementptr inbounds i32, ptr %6, i64 undef
+    store i32 %tmp55, ptr %tmp56
     %tmp57 = add i64 %tmp9, 1
     %lsr.iv.next7 = add i2 %lsr.iv6, -1
     br label %bb8
   }
   
-  declare i8* @sre_malloc() #0
+  declare ptr @sre_malloc() #0
   
   declare void @malloc() #0
   
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #1
+  declare void @llvm.stackprotector(ptr, ptr) #1
   
   attributes #0 = { "target-cpu"="z13" }
   attributes #1 = { nounwind }
@@ -263,7 +263,7 @@ body:             |
     %50:gr32bit = nsw SR %50, %0.subreg_l32, implicit-def dead $cc
     %52:addr64bit = LGFR %50
     %52:addr64bit = MGHI %52, 40
-    MVC undef %53:addr64bit, 0, 4, %52, 32 :: (store (s32) into `i32* undef`), (load (s32) from %ir.tmp47)
+    MVC undef %53:addr64bit, 0, 4, %52, 32 :: (store (s32) into `ptr undef`), (load (s32) from %ir.tmp47)
     CHIMux %32.subreg_l32, 0, implicit-def $cc
     BRC 14, 6, %bb.7, implicit killed $cc
     J %bb.6

diff  --git a/llvm/test/CodeGen/SystemZ/dag-combine-02.ll b/llvm/test/CodeGen/SystemZ/dag-combine-02.ll
index 6786e2883da84..fd6b5c7a12b2e 100644
--- a/llvm/test/CodeGen/SystemZ/dag-combine-02.ll
+++ b/llvm/test/CodeGen/SystemZ/dag-combine-02.ll
@@ -4,8 +4,8 @@
 ; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 -O3 | FileCheck %s
 
 @g_56 = external hidden unnamed_addr global i64, align 8
- at func_22.l_91 = external hidden unnamed_addr constant [4 x [7 x i16*]], align 8
- at g_102 = external hidden unnamed_addr global i16**, align 8
+ at func_22.l_91 = external hidden unnamed_addr constant [4 x [7 x ptr]], align 8
+ at g_102 = external hidden unnamed_addr global ptr, align 8
 @.str = external hidden unnamed_addr constant [2 x i8], align 2
 @.str.1 = external hidden unnamed_addr constant [15 x i8], align 2
 @crc32_context = external hidden unnamed_addr global i32, align 4
@@ -15,24 +15,24 @@
 @g_181.0.4.5 = external hidden unnamed_addr global i1, align 2
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #0
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #0
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1) #0
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1) #0
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #0
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #0
 
 ; Function Attrs: nounwind
-define signext i32 @main(i32 signext, i8** nocapture readonly) local_unnamed_addr #1 {
-  %3 = alloca [4 x [7 x i16*]], align 8
+define signext i32 @main(i32 signext, ptr nocapture readonly) local_unnamed_addr #1 {
+  %3 = alloca [4 x [7 x ptr]], align 8
   %4 = icmp eq i32 %0, 2
   br i1 %4, label %5, label %11
 
 ; <label>:5:                                      ; preds = %2
-  %6 = getelementptr inbounds i8*, i8** %1, i64 1
-  %7 = load i8*, i8** %6, align 8
-  %8 = tail call signext i32 @strcmp(i8* %7, i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str, i64 0, i64 0)) #4
+  %6 = getelementptr inbounds ptr, ptr %1, i64 1
+  %7 = load ptr, ptr %6, align 8
+  %8 = tail call signext i32 @strcmp(ptr %7, ptr @.str) #4
   %9 = icmp eq i32 %8, 0
   %10 = zext i1 %9 to i32
   br label %11
@@ -84,9 +84,9 @@ define signext i32 @main(i32 signext, i8** nocapture readonly) local_unnamed_add
   %53 = lshr <4 x i32> %50, <i32 1, i32 1, i32 1, i32 1>
   %54 = xor <4 x i32> %53, <i32 -306674912, i32 -306674912, i32 -306674912, i32 -306674912>
   %55 = select <4 x i1> %52, <4 x i32> %53, <4 x i32> %54
-  %56 = getelementptr inbounds [256 x i32], [256 x i32]* @crc32_tab, i64 0, i64 %14
-  %57 = bitcast i32* %56 to <4 x i32>*
-  store <4 x i32> %55, <4 x i32>* %57, align 4
+  %56 = getelementptr inbounds [256 x i32], ptr @crc32_tab, i64 0, i64 %14
+  %57 = bitcast ptr %56 to ptr
+  store <4 x i32> %55, ptr %57, align 4
   %58 = add i64 %14, 4
   %59 = add <4 x i32> %15, <i32 4, i32 4, i32 4, i32 4>
   %60 = icmp eq i64 %58, 256
@@ -97,96 +97,96 @@ define signext i32 @main(i32 signext, i8** nocapture readonly) local_unnamed_add
 ; CHECK: stgrl   %r0, g_56
 ; CHECK: llhrl   %r0, g_56+6
 ; CHECK: stgrl   %r2, g_56
-  store i64 0, i64* @g_56, align 8
-  %62 = bitcast [4 x [7 x i16*]]* %3 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 224, i8* nonnull %62) #5
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 nonnull %62, i8* align 8 bitcast ([4 x [7 x i16*]]* @func_22.l_91 to i8*), i64 224, i1 false) #5
-  %63 = getelementptr inbounds [4 x [7 x i16*]], [4 x [7 x i16*]]* %3, i64 0, i64 0, i64 2
-  store i16** %63, i16*** @g_102, align 8
-  %64 = load i64, i64* @g_56, align 8
-  store i64 2, i64* @g_56, align 8
+  store i64 0, ptr @g_56, align 8
+  %62 = bitcast ptr %3 to ptr
+  call void @llvm.lifetime.start.p0(i64 224, ptr nonnull %62) #5
+  call void @llvm.memcpy.p0.p0.i64(ptr align 8 nonnull %62, ptr align 8 @func_22.l_91, i64 224, i1 false) #5
+  %63 = getelementptr inbounds [4 x [7 x ptr]], ptr %3, i64 0, i64 0, i64 2
+  store ptr %63, ptr @g_102, align 8
+  %64 = load i64, ptr @g_56, align 8
+  store i64 2, ptr @g_56, align 8
   %65 = and i64 %64, 65535
   %66 = icmp eq i64 %65, 0
   br i1 %66, label %68, label %67
 
 ; <label>:67:                                     ; preds = %61
-  store i1 true, i1* @g_181.0.4.5, align 2
+  store i1 true, ptr @g_181.0.4.5, align 2
   br label %68
 
 ; <label>:68:                                     ; preds = %67, %61
-  call void @llvm.lifetime.end.p0i8(i64 224, i8* nonnull %62) #5
-  %69 = load i1, i1* @g_181.0.4.5, align 2
+  call void @llvm.lifetime.end.p0(i64 224, ptr nonnull %62) #5
+  %69 = load i1, ptr @g_181.0.4.5, align 2
   %70 = select i1 %69, i32 0, i32 72
-  %71 = load i32, i32* @crc32_context, align 4
+  %71 = load i32, ptr @crc32_context, align 4
   %72 = lshr i32 %71, 8
   %73 = and i32 %71, 255
   %74 = xor i32 %73, %70
   %75 = zext i32 %74 to i64
-  %76 = getelementptr inbounds [256 x i32], [256 x i32]* @crc32_tab, i64 0, i64 %75
-  %77 = load i32, i32* %76, align 4
+  %76 = getelementptr inbounds [256 x i32], ptr @crc32_tab, i64 0, i64 %75
+  %77 = load i32, ptr %76, align 4
   %78 = xor i32 %72, %77
   %79 = lshr i32 %78, 8
   %80 = and i32 %78, 255
   %81 = zext i32 %80 to i64
-  %82 = getelementptr inbounds [256 x i32], [256 x i32]* @crc32_tab, i64 0, i64 %81
-  %83 = load i32, i32* %82, align 4
+  %82 = getelementptr inbounds [256 x i32], ptr @crc32_tab, i64 0, i64 %81
+  %83 = load i32, ptr %82, align 4
   %84 = xor i32 %79, %83
   %85 = lshr i32 %84, 8
   %86 = and i32 %84, 255
   %87 = zext i32 %86 to i64
-  %88 = getelementptr inbounds [256 x i32], [256 x i32]* @crc32_tab, i64 0, i64 %87
-  %89 = load i32, i32* %88, align 4
+  %88 = getelementptr inbounds [256 x i32], ptr @crc32_tab, i64 0, i64 %87
+  %89 = load i32, ptr %88, align 4
   %90 = xor i32 %85, %89
   %91 = lshr i32 %90, 8
   %92 = and i32 %90, 255
   %93 = zext i32 %92 to i64
-  %94 = getelementptr inbounds [256 x i32], [256 x i32]* @crc32_tab, i64 0, i64 %93
-  %95 = load i32, i32* %94, align 4
+  %94 = getelementptr inbounds [256 x i32], ptr @crc32_tab, i64 0, i64 %93
+  %95 = load i32, ptr %94, align 4
   %96 = xor i32 %91, %95
   %97 = lshr i32 %96, 8
   %98 = and i32 %96, 255
   %99 = zext i32 %98 to i64
-  %100 = getelementptr inbounds [256 x i32], [256 x i32]* @crc32_tab, i64 0, i64 %99
-  %101 = load i32, i32* %100, align 4
+  %100 = getelementptr inbounds [256 x i32], ptr @crc32_tab, i64 0, i64 %99
+  %101 = load i32, ptr %100, align 4
   %102 = xor i32 %97, %101
   %103 = lshr i32 %102, 8
   %104 = and i32 %102, 255
   %105 = zext i32 %104 to i64
-  %106 = getelementptr inbounds [256 x i32], [256 x i32]* @crc32_tab, i64 0, i64 %105
-  %107 = load i32, i32* %106, align 4
+  %106 = getelementptr inbounds [256 x i32], ptr @crc32_tab, i64 0, i64 %105
+  %107 = load i32, ptr %106, align 4
   %108 = xor i32 %103, %107
   %109 = lshr i32 %108, 8
   %110 = and i32 %108, 255
   %111 = zext i32 %110 to i64
-  %112 = getelementptr inbounds [256 x i32], [256 x i32]* @crc32_tab, i64 0, i64 %111
-  %113 = load i32, i32* %112, align 4
+  %112 = getelementptr inbounds [256 x i32], ptr @crc32_tab, i64 0, i64 %111
+  %113 = load i32, ptr %112, align 4
   %114 = xor i32 %109, %113
   %115 = lshr i32 %114, 8
   %116 = and i32 %114, 255
   %117 = zext i32 %116 to i64
-  %118 = getelementptr inbounds [256 x i32], [256 x i32]* @crc32_tab, i64 0, i64 %117
-  %119 = load i32, i32* %118, align 4
+  %118 = getelementptr inbounds [256 x i32], ptr @crc32_tab, i64 0, i64 %117
+  %119 = load i32, ptr %118, align 4
   %120 = xor i32 %115, %119
-  store i32 %120, i32* @crc32_context, align 4
+  store i32 %120, ptr @crc32_context, align 4
   %121 = icmp eq i32 %12, 0
   br i1 %121, label %127, label %122
 
 ; <label>:122:                                    ; preds = %68
   %123 = xor i32 %120, -1
   %124 = zext i32 %123 to i64
-  %125 = call signext i32 (i8*, ...) @printf(i8* getelementptr inbounds ([36 x i8], [36 x i8]* @.str.2, i64 0, i64 0), i8* getelementptr inbounds ([15 x i8], [15 x i8]* @.str.1, i64 0, i64 0), i64 %124) #5
-  %126 = load i32, i32* @crc32_context, align 4
+  %125 = call signext i32 (ptr, ...) @printf(ptr @.str.2, ptr @.str.1, i64 %124) #5
+  %126 = load i32, ptr @crc32_context, align 4
   br label %127
 
 ; <label>:127:                                    ; preds = %122, %68
   %128 = phi i32 [ %120, %68 ], [ %126, %122 ]
   %129 = xor i32 %128, -1
-  %130 = call signext i32 (i8*, ...) @printf(i8* getelementptr inbounds ([15 x i8], [15 x i8]* @.str.3, i64 0, i64 0), i32 zeroext %129) #5
+  %130 = call signext i32 (ptr, ...) @printf(ptr @.str.3, i32 zeroext %129) #5
   ret i32 0
 }
 
 ; Function Attrs: nounwind readonly
-declare signext i32 @strcmp(i8* nocapture, i8* nocapture) local_unnamed_addr #2
+declare signext i32 @strcmp(ptr nocapture, ptr nocapture) local_unnamed_addr #2
 
 ; Function Attrs: nounwind
-declare signext i32 @printf(i8* nocapture readonly, ...) local_unnamed_addr #3
+declare signext i32 @printf(ptr nocapture readonly, ...) local_unnamed_addr #3

diff  --git a/llvm/test/CodeGen/SystemZ/debuginstr-00.mir b/llvm/test/CodeGen/SystemZ/debuginstr-00.mir
index 25cda188689ba..885b2a69bbf53 100644
--- a/llvm/test/CodeGen/SystemZ/debuginstr-00.mir
+++ b/llvm/test/CodeGen/SystemZ/debuginstr-00.mir
@@ -29,7 +29,7 @@
   
   declare void @llvm.dbg.value(metadata, i64, metadata, metadata) #1
   
-  declare void @llvm.stackprotector(i8*, i8**) #2
+  declare void @llvm.stackprotector(ptr, ptr) #2
   
   attributes #0 = { "target-cpu"="z13" "use-soft-float"="false" }
   attributes #1 = { nounwind readnone speculatable "target-cpu"="z13" }

diff  --git a/llvm/test/CodeGen/SystemZ/debuginstr-01.mir b/llvm/test/CodeGen/SystemZ/debuginstr-01.mir
index ac86889d3caff..9443b8e078118 100644
--- a/llvm/test/CodeGen/SystemZ/debuginstr-01.mir
+++ b/llvm/test/CodeGen/SystemZ/debuginstr-01.mir
@@ -8,9 +8,9 @@
 
 --- |
   
-  define i32 @f1(i32* %ptr) #1 {
+  define i32 @f1(ptr %ptr) #1 {
   entry:
-    %val = load i32, i32* %ptr
+    %val = load i32, ptr %ptr
     ; Keep the dbg metadata live by referencing it in the IR.
     call void @llvm.dbg.value(metadata i32 0, i64 0, metadata !7, metadata !DIExpression()), !dbg !9
     %cmp = icmp eq i32 %val, 0
@@ -27,7 +27,7 @@
   declare void @llvm.trap() #0
   declare void @llvm.dbg.value(metadata, i64, metadata, metadata)
   
-  declare void @llvm.stackprotector(i8*, i8**) #2
+  declare void @llvm.stackprotector(ptr, ptr) #2
 
   attributes #0 = { cold noreturn nounwind "target-cpu"="zEC12" }
   attributes #1 = { "target-cpu"="zEC12" }

diff  --git a/llvm/test/CodeGen/SystemZ/debuginstr-cgp.mir b/llvm/test/CodeGen/SystemZ/debuginstr-cgp.mir
index 37a1bd776fd84..bc7e016e1c144 100644
--- a/llvm/test/CodeGen/SystemZ/debuginstr-cgp.mir
+++ b/llvm/test/CodeGen/SystemZ/debuginstr-cgp.mir
@@ -90,19 +90,19 @@
 
 --- |
   
-  %0 = type { i32 (...)**, i16, %1* }
-  %1 = type { i32 (...)** }
-  %2 = type { i32 (...)**, %1*, i8, i32, i32, i32, i16, i32, i16, i32, i16*, %3*, %6*, %9 }
+  %0 = type { ptr, i16, ptr }
+  %1 = type { ptr }
+  %2 = type { ptr, ptr, i8, i32, i32, i32, i16, i32, i16, i32, ptr, ptr, ptr, %9 }
   %3 = type { %4 }
-  %4 = type { i32 (...)**, i8, i32, i32, %5**, %1* }
+  %4 = type { ptr, i8, i32, i32, ptr, ptr }
   %5 = type { i32, i32 }
-  %6 = type { %7*, %0*, %0*, %0*, %0*, %0*, %0*, %0*, %0*, %0*, %0*, %0*, %0*, %0*, %0*, %1* }
+  %6 = type { ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr }
   %7 = type { %8 }
-  %8 = type { i32 (...)**, i8, i32, i32, %0**, %1* }
-  %9 = type { i8* }
-  %10 = type { %0, i32, i32, %0* }
+  %8 = type { ptr, i8, i32, i32, ptr, ptr }
+  %9 = type { ptr }
+  %10 = type { %0, i32, i32, ptr }
   
-  define %0* @Fun(%2* %arg) #0 !dbg !7 {
+  define ptr @Fun(ptr %arg) #0 !dbg !7 {
   bb:
     switch i32 undef, label %bb3 [
       i32 58, label %bb1
@@ -113,22 +113,22 @@
     br label %bb4, !dbg !15
   
   bb2:                                              ; preds = %bb
-    %tmp = tail call %10* @hoge(%6* undef, %0* undef, i32 signext 0, i32 signext 0), !dbg !16
-    call void @llvm.dbg.value(metadata %10* %tmp, metadata !10, metadata !DIExpression()), !dbg !16
+    %tmp = tail call ptr @hoge(ptr undef, ptr undef, i32 signext 0, i32 signext 0), !dbg !16
+    call void @llvm.dbg.value(metadata ptr %tmp, metadata !10, metadata !DIExpression()), !dbg !16
     br label %bb4, !dbg !17
   
   bb3:                                              ; preds = %bb
     unreachable, !dbg !18
   
   bb4:                                              ; preds = %bb2, %bb1
-    %tmp5 = phi %10* [ undef, %bb1 ], [ %tmp, %bb2 ], !dbg !19
-    call void @llvm.dbg.value(metadata %10* %tmp5, metadata !12, metadata !DIExpression()), !dbg !19
-    %tmp6 = bitcast %10* %tmp5 to %0*, !dbg !20
-    call void @llvm.dbg.value(metadata %0* %tmp6, metadata !13, metadata !DIExpression()), !dbg !20
-    ret %0* %tmp6, !dbg !21
+    %tmp5 = phi ptr [ undef, %bb1 ], [ %tmp, %bb2 ], !dbg !19
+    call void @llvm.dbg.value(metadata ptr %tmp5, metadata !12, metadata !DIExpression()), !dbg !19
+    %tmp6 = bitcast ptr %tmp5 to ptr, !dbg !20
+    call void @llvm.dbg.value(metadata ptr %tmp6, metadata !13, metadata !DIExpression()), !dbg !20
+    ret ptr %tmp6, !dbg !21
   }
   
-  declare %10* @hoge(%6*, %0*, i32, i32) #0
+  declare ptr @hoge(ptr, ptr, i32, i32) #0
   
   ; Function Attrs: nounwind readnone speculatable
   declare void @llvm.dbg.value(metadata, metadata, metadata) #1

diff  --git a/llvm/test/CodeGen/SystemZ/foldmemop-imm-02.mir b/llvm/test/CodeGen/SystemZ/foldmemop-imm-02.mir
index 92f176db0ae64..653bb42e1cad2 100644
--- a/llvm/test/CodeGen/SystemZ/foldmemop-imm-02.mir
+++ b/llvm/test/CodeGen/SystemZ/foldmemop-imm-02.mir
@@ -4,10 +4,10 @@
 # Test folding of a memory operand into logical compare with an immediate.
 
 --- |
-  define i32 @fun0(i32* %src, i32 %arg) { ret i32 0 }
-  define i64 @fun1(i64* %src, i64 %arg) { ret i64 0 }
-  define i32 @fun2(i32* %src, i32 %arg) { ret i32 0 }
-  define i64 @fun3(i64* %src, i64 %arg) { ret i64 0 }
+  define i32 @fun0(ptr %src, i32 %arg) { ret i32 0 }
+  define i64 @fun1(ptr %src, i64 %arg) { ret i64 0 }
+  define i32 @fun2(ptr %src, i32 %arg) { ret i32 0 }
+  define i64 @fun3(ptr %src, i64 %arg) { ret i64 0 }
 ...
 
 

diff  --git a/llvm/test/CodeGen/SystemZ/foldmemop-msc.mir b/llvm/test/CodeGen/SystemZ/foldmemop-msc.mir
index 8d67e6cfef1b3..65429c589dc7a 100644
--- a/llvm/test/CodeGen/SystemZ/foldmemop-msc.mir
+++ b/llvm/test/CodeGen/SystemZ/foldmemop-msc.mir
@@ -4,10 +4,10 @@
 # Test folding of a memory operand into logical compare with an immediate.
 
 --- |
-  define i32 @fun0(i32* %src, i32 %arg) { ret i32 0 }
-  define i64 @fun1(i64* %src, i64 %arg) { ret i64 0 }
-  define i32 @fun2(i32* %src, i32 %arg) { ret i32 0 }
-  define i64 @fun3(i64* %src, i64 %arg) { ret i64 0 }
+  define i32 @fun0(ptr %src, i32 %arg) { ret i32 0 }
+  define i64 @fun1(ptr %src, i64 %arg) { ret i64 0 }
+  define i32 @fun2(ptr %src, i32 %arg) { ret i32 0 }
+  define i64 @fun3(ptr %src, i64 %arg) { ret i64 0 }
 ...
 
 

diff  --git a/llvm/test/CodeGen/SystemZ/foldmemop-vec-binops.mir b/llvm/test/CodeGen/SystemZ/foldmemop-vec-binops.mir
index 6ef9de775fb01..c0be090568dd9 100644
--- a/llvm/test/CodeGen/SystemZ/foldmemop-vec-binops.mir
+++ b/llvm/test/CodeGen/SystemZ/foldmemop-vec-binops.mir
@@ -4,30 +4,30 @@
 # Test folding of a memory operand into an fp memory instruction.
 
 --- |
-  define void @fun0(double %arg0, double %arg1, double* %Dst) { ret void }
-  define void @fun1(double %arg0, double %arg1, double* %Dst) { ret void }
-  define void @fun2(double %arg0, double %arg1, double* %Dst) { ret void }
-  define void @fun3(float %arg0, float %arg1, float* %Dst) { ret void }
-  define void @fun4(float %arg0, float %arg1, float* %Dst) { ret void }
-  define void @fun5(float %arg0, float %arg1, float* %Dst) { ret void }
-  define void @fun6(double %arg0, double %arg1, double* %Dst) { ret void }
-  define void @fun7(double %arg0, double %arg1, double* %Dst) { ret void }
-  define void @fun8(double %arg0, double %arg1, double* %Dst) { ret void }
-  define void @fun9(float %arg0, float %arg1, float* %Dst) { ret void }
-  define void @fun10(float %arg0, float %arg1, float* %Dst) { ret void }
-  define void @fun11(float %arg0, float %arg1, float* %Dst) { ret void }
-  define void @fun12(double %arg0, double %arg1, double* %Dst) { ret void }
-  define void @fun13(double %arg0, double %arg1, double* %Dst) { ret void }
-  define void @fun14(double %arg0, double %arg1, double* %Dst) { ret void }
-  define void @fun15(float %arg0, float %arg1, float* %Dst) { ret void }
-  define void @fun16(float %arg0, float %arg1, float* %Dst) { ret void }
-  define void @fun17(float %arg0, float %arg1, float* %Dst) { ret void }
-  define void @fun18(double %arg0, double %arg1, double* %Dst) { ret void }
-  define void @fun19(double %arg0, double %arg1, double* %Dst) { ret void }
-  define void @fun20(double %arg0, double %arg1, double* %Dst) { ret void }
-  define void @fun21(float %arg0, float %arg1, float* %Dst) { ret void }
-  define void @fun22(float %arg0, float %arg1, float* %Dst) { ret void }
-  define void @fun23(float %arg0, float %arg1, float* %Dst) { ret void }
+  define void @fun0(double %arg0, double %arg1, ptr %Dst) { ret void }
+  define void @fun1(double %arg0, double %arg1, ptr %Dst) { ret void }
+  define void @fun2(double %arg0, double %arg1, ptr %Dst) { ret void }
+  define void @fun3(float %arg0, float %arg1, ptr %Dst) { ret void }
+  define void @fun4(float %arg0, float %arg1, ptr %Dst) { ret void }
+  define void @fun5(float %arg0, float %arg1, ptr %Dst) { ret void }
+  define void @fun6(double %arg0, double %arg1, ptr %Dst) { ret void }
+  define void @fun7(double %arg0, double %arg1, ptr %Dst) { ret void }
+  define void @fun8(double %arg0, double %arg1, ptr %Dst) { ret void }
+  define void @fun9(float %arg0, float %arg1, ptr %Dst) { ret void }
+  define void @fun10(float %arg0, float %arg1, ptr %Dst) { ret void }
+  define void @fun11(float %arg0, float %arg1, ptr %Dst) { ret void }
+  define void @fun12(double %arg0, double %arg1, ptr %Dst) { ret void }
+  define void @fun13(double %arg0, double %arg1, ptr %Dst) { ret void }
+  define void @fun14(double %arg0, double %arg1, ptr %Dst) { ret void }
+  define void @fun15(float %arg0, float %arg1, ptr %Dst) { ret void }
+  define void @fun16(float %arg0, float %arg1, ptr %Dst) { ret void }
+  define void @fun17(float %arg0, float %arg1, ptr %Dst) { ret void }
+  define void @fun18(double %arg0, double %arg1, ptr %Dst) { ret void }
+  define void @fun19(double %arg0, double %arg1, ptr %Dst) { ret void }
+  define void @fun20(double %arg0, double %arg1, ptr %Dst) { ret void }
+  define void @fun21(float %arg0, float %arg1, ptr %Dst) { ret void }
+  define void @fun22(float %arg0, float %arg1, ptr %Dst) { ret void }
+  define void @fun23(float %arg0, float %arg1, ptr %Dst) { ret void }
 
 ...
 

diff  --git a/llvm/test/CodeGen/SystemZ/foldmemop-vec-cc.mir b/llvm/test/CodeGen/SystemZ/foldmemop-vec-cc.mir
index 41716991dddf4..dbdd3a0a21bf1 100644
--- a/llvm/test/CodeGen/SystemZ/foldmemop-vec-cc.mir
+++ b/llvm/test/CodeGen/SystemZ/foldmemop-vec-cc.mir
@@ -4,7 +4,7 @@
 # Test that folding does not occur if it would introduce a clobbering of a live CC.
 
 --- |
-  define void @fun0(double %arg0, double %arg1, double* %Dst) { ret void }
+  define void @fun0(double %arg0, double %arg1, ptr %Dst) { ret void }
 
 ...
 

diff  --git a/llvm/test/CodeGen/SystemZ/foldmemop-vec-cmp.mir b/llvm/test/CodeGen/SystemZ/foldmemop-vec-cmp.mir
index 1f66c4204e859..1344809651ad7 100644
--- a/llvm/test/CodeGen/SystemZ/foldmemop-vec-cmp.mir
+++ b/llvm/test/CodeGen/SystemZ/foldmemop-vec-cmp.mir
@@ -4,19 +4,19 @@
 # Test folding of a memory operand into an fp memory instruction.
 
 --- |
-  define void @fun0(double %arg0, double %arg1, double* %Dst) { ret void }
-  define void @fun1(double %arg0, double %arg1, double* %Dst) { ret void }
-  define void @fun2(double %arg0, double %arg1, double* %Dst) { ret void }
-  define void @fun3(float %arg0, float %arg1, float* %Dst) { ret void }
-  define void @fun4(float %arg0, float %arg1, float* %Dst) { ret void }
-  define void @fun5(float %arg0, float %arg1, float* %Dst) { ret void }
-  define void @fun6(double %arg0, double %arg1, double* %Dst) { ret void }
-  define void @fun7(double %arg0, double %arg1, double* %Dst) { ret void }
-  define void @fun8(double %arg0, double %arg1, double* %Dst) { ret void }
-  define void @fun9(float %arg0, float %arg1, float* %Dst) { ret void }
-  define void @fun10(float %arg0, float %arg1, float* %Dst) { ret void }
-  define void @fun11(float %arg0, float %arg1, float* %Dst) { ret void }
-  define void @fun12(float %arg0, float %arg1, float* %Dst) { ret void }
+  define void @fun0(double %arg0, double %arg1, ptr %Dst) { ret void }
+  define void @fun1(double %arg0, double %arg1, ptr %Dst) { ret void }
+  define void @fun2(double %arg0, double %arg1, ptr %Dst) { ret void }
+  define void @fun3(float %arg0, float %arg1, ptr %Dst) { ret void }
+  define void @fun4(float %arg0, float %arg1, ptr %Dst) { ret void }
+  define void @fun5(float %arg0, float %arg1, ptr %Dst) { ret void }
+  define void @fun6(double %arg0, double %arg1, ptr %Dst) { ret void }
+  define void @fun7(double %arg0, double %arg1, ptr %Dst) { ret void }
+  define void @fun8(double %arg0, double %arg1, ptr %Dst) { ret void }
+  define void @fun9(float %arg0, float %arg1, ptr %Dst) { ret void }
+  define void @fun10(float %arg0, float %arg1, ptr %Dst) { ret void }
+  define void @fun11(float %arg0, float %arg1, ptr %Dst) { ret void }
+  define void @fun12(float %arg0, float %arg1, ptr %Dst) { ret void }
 
 ...
 

diff  --git a/llvm/test/CodeGen/SystemZ/foldmemop-vec-fusedfp.mir b/llvm/test/CodeGen/SystemZ/foldmemop-vec-fusedfp.mir
index e71d26830b9c5..9d165a56193ba 100644
--- a/llvm/test/CodeGen/SystemZ/foldmemop-vec-fusedfp.mir
+++ b/llvm/test/CodeGen/SystemZ/foldmemop-vec-fusedfp.mir
@@ -4,22 +4,22 @@
 # Test folding of a memory operand into an fp memory instruction.
 
 --- |
-  define void @fun0(double %arg0, double %arg1, double* %Dst) { ret void }
-  define void @fun1(double %arg0, double %arg1, double* %Dst) { ret void }
-  define void @fun2(double %arg0, double %arg1, double* %Dst) { ret void }
-  define void @fun3(double %arg0, double %arg1, double* %Dst) { ret void }
-  define void @fun4(float %arg0, float %arg1, float* %Dst) { ret void }
-  define void @fun5(float %arg0, float %arg1, float* %Dst) { ret void }
-  define void @fun6(float %arg0, float %arg1, float* %Dst) { ret void }
-  define void @fun7(float %arg0, float %arg1, float* %Dst) { ret void }
-  define void @fun8(double %arg0, double %arg1, double* %Dst) { ret void }
-  define void @fun9(double %arg0, double %arg1, double* %Dst) { ret void }
-  define void @fun10(double %arg0, double %arg1, double* %Dst) { ret void }
-  define void @fun11(double %arg0, double %arg1, double* %Dst) { ret void }
-  define void @fun12(float %arg0, float %arg1, float* %Dst) { ret void }
-  define void @fun13(float %arg0, float %arg1, float* %Dst) { ret void }
-  define void @fun14(float %arg0, float %arg1, float* %Dst) { ret void }
-  define void @fun15(float %arg0, float %arg1, float* %Dst) { ret void }
+  define void @fun0(double %arg0, double %arg1, ptr %Dst) { ret void }
+  define void @fun1(double %arg0, double %arg1, ptr %Dst) { ret void }
+  define void @fun2(double %arg0, double %arg1, ptr %Dst) { ret void }
+  define void @fun3(double %arg0, double %arg1, ptr %Dst) { ret void }
+  define void @fun4(float %arg0, float %arg1, ptr %Dst) { ret void }
+  define void @fun5(float %arg0, float %arg1, ptr %Dst) { ret void }
+  define void @fun6(float %arg0, float %arg1, ptr %Dst) { ret void }
+  define void @fun7(float %arg0, float %arg1, ptr %Dst) { ret void }
+  define void @fun8(double %arg0, double %arg1, ptr %Dst) { ret void }
+  define void @fun9(double %arg0, double %arg1, ptr %Dst) { ret void }
+  define void @fun10(double %arg0, double %arg1, ptr %Dst) { ret void }
+  define void @fun11(double %arg0, double %arg1, ptr %Dst) { ret void }
+  define void @fun12(float %arg0, float %arg1, ptr %Dst) { ret void }
+  define void @fun13(float %arg0, float %arg1, ptr %Dst) { ret void }
+  define void @fun14(float %arg0, float %arg1, ptr %Dst) { ret void }
+  define void @fun15(float %arg0, float %arg1, ptr %Dst) { ret void }
 
 ...
 

diff  --git a/llvm/test/CodeGen/SystemZ/fp-conv-17.mir b/llvm/test/CodeGen/SystemZ/fp-conv-17.mir
index d9ed5303b35ae..f495e827ed9bc 100644
--- a/llvm/test/CodeGen/SystemZ/fp-conv-17.mir
+++ b/llvm/test/CodeGen/SystemZ/fp-conv-17.mir
@@ -1,24 +1,24 @@
 # RUN: llc -mtriple=s390x-linux-gnu -mcpu=z10 -start-before=greedy %s -o - \
 # RUN:   | FileCheck %s
 --- |
-  define void @f0(double* %ptr1, float* %ptr2) {
-    %val0 = load volatile float, float* %ptr2
-    %val1 = load volatile float, float* %ptr2
-    %val2 = load volatile float, float* %ptr2
-    %val3 = load volatile float, float* %ptr2
-    %val4 = load volatile float, float* %ptr2
-    %val5 = load volatile float, float* %ptr2
-    %val6 = load volatile float, float* %ptr2
-    %val7 = load volatile float, float* %ptr2
-    %val8 = load volatile float, float* %ptr2
-    %val9 = load volatile float, float* %ptr2
-    %val10 = load volatile float, float* %ptr2
-    %val11 = load volatile float, float* %ptr2
-    %val12 = load volatile float, float* %ptr2
-    %val13 = load volatile float, float* %ptr2
-    %val14 = load volatile float, float* %ptr2
-    %val15 = load volatile float, float* %ptr2
-    %val16 = load volatile float, float* %ptr2
+  define void @f0(ptr %ptr1, ptr %ptr2) {
+    %val0 = load volatile float, ptr %ptr2
+    %val1 = load volatile float, ptr %ptr2
+    %val2 = load volatile float, ptr %ptr2
+    %val3 = load volatile float, ptr %ptr2
+    %val4 = load volatile float, ptr %ptr2
+    %val5 = load volatile float, ptr %ptr2
+    %val6 = load volatile float, ptr %ptr2
+    %val7 = load volatile float, ptr %ptr2
+    %val8 = load volatile float, ptr %ptr2
+    %val9 = load volatile float, ptr %ptr2
+    %val10 = load volatile float, ptr %ptr2
+    %val11 = load volatile float, ptr %ptr2
+    %val12 = load volatile float, ptr %ptr2
+    %val13 = load volatile float, ptr %ptr2
+    %val14 = load volatile float, ptr %ptr2
+    %val15 = load volatile float, ptr %ptr2
+    %val16 = load volatile float, ptr %ptr2
     %ext0 = fpext float %val0 to double
     %ext1 = fpext float %val1 to double
     %ext2 = fpext float %val2 to double
@@ -36,40 +36,40 @@
     %ext14 = fpext float %val14 to double
     %ext15 = fpext float %val15 to double
     %ext16 = fpext float %val16 to double
-    store volatile float %val0, float* %ptr2
-    store volatile float %val1, float* %ptr2
-    store volatile float %val2, float* %ptr2
-    store volatile float %val3, float* %ptr2
-    store volatile float %val4, float* %ptr2
-    store volatile float %val5, float* %ptr2
-    store volatile float %val6, float* %ptr2
-    store volatile float %val7, float* %ptr2
-    store volatile float %val8, float* %ptr2
-    store volatile float %val9, float* %ptr2
-    store volatile float %val10, float* %ptr2
-    store volatile float %val11, float* %ptr2
-    store volatile float %val12, float* %ptr2
-    store volatile float %val13, float* %ptr2
-    store volatile float %val14, float* %ptr2
-    store volatile float %val15, float* %ptr2
-    store volatile float %val16, float* %ptr2
-    store volatile double %ext0, double* %ptr1
-    store volatile double %ext1, double* %ptr1
-    store volatile double %ext2, double* %ptr1
-    store volatile double %ext3, double* %ptr1
-    store volatile double %ext4, double* %ptr1
-    store volatile double %ext5, double* %ptr1
-    store volatile double %ext6, double* %ptr1
-    store volatile double %ext7, double* %ptr1
-    store volatile double %ext8, double* %ptr1
-    store volatile double %ext9, double* %ptr1
-    store volatile double %ext10, double* %ptr1
-    store volatile double %ext11, double* %ptr1
-    store volatile double %ext12, double* %ptr1
-    store volatile double %ext13, double* %ptr1
-    store volatile double %ext14, double* %ptr1
-    store volatile double %ext15, double* %ptr1
-    store volatile double %ext16, double* %ptr1
+    store volatile float %val0, ptr %ptr2
+    store volatile float %val1, ptr %ptr2
+    store volatile float %val2, ptr %ptr2
+    store volatile float %val3, ptr %ptr2
+    store volatile float %val4, ptr %ptr2
+    store volatile float %val5, ptr %ptr2
+    store volatile float %val6, ptr %ptr2
+    store volatile float %val7, ptr %ptr2
+    store volatile float %val8, ptr %ptr2
+    store volatile float %val9, ptr %ptr2
+    store volatile float %val10, ptr %ptr2
+    store volatile float %val11, ptr %ptr2
+    store volatile float %val12, ptr %ptr2
+    store volatile float %val13, ptr %ptr2
+    store volatile float %val14, ptr %ptr2
+    store volatile float %val15, ptr %ptr2
+    store volatile float %val16, ptr %ptr2
+    store volatile double %ext0, ptr %ptr1
+    store volatile double %ext1, ptr %ptr1
+    store volatile double %ext2, ptr %ptr1
+    store volatile double %ext3, ptr %ptr1
+    store volatile double %ext4, ptr %ptr1
+    store volatile double %ext5, ptr %ptr1
+    store volatile double %ext6, ptr %ptr1
+    store volatile double %ext7, ptr %ptr1
+    store volatile double %ext8, ptr %ptr1
+    store volatile double %ext9, ptr %ptr1
+    store volatile double %ext10, ptr %ptr1
+    store volatile double %ext11, ptr %ptr1
+    store volatile double %ext12, ptr %ptr1
+    store volatile double %ext13, ptr %ptr1
+    store volatile double %ext14, ptr %ptr1
+    store volatile double %ext15, ptr %ptr1
+    store volatile double %ext16, ptr %ptr1
     ret void
   }
   

diff  --git a/llvm/test/CodeGen/SystemZ/frame-26.mir b/llvm/test/CodeGen/SystemZ/frame-26.mir
index 1831253becf64..d37f565bdea79 100644
--- a/llvm/test/CodeGen/SystemZ/frame-26.mir
+++ b/llvm/test/CodeGen/SystemZ/frame-26.mir
@@ -17,16 +17,16 @@
 --- |
   
   @g_181 = external dso_local global i32, align 4
-  @g_1390 = external dso_local constant i64*, align 8
+  @g_1390 = external dso_local constant ptr, align 8
   
-  define internal i8 @fun0(i8 %arg, i8 %arg1, i32 %arg2, i8 %arg3, i32* %arg4, float %F0, float %F1) #0 {
+  define internal i8 @fun0(i8 %arg, i8 %arg1, i32 %arg2, i8 %arg3, ptr %arg4, float %F0, float %F1) #0 {
     ret i8 0
   }
   
   ; Same function but in a single block which will make the verifier complain
   ; if R6 is killed by the original store before the point where the
   ; RegScavenger inserts its (killing) store of R6.
-  define internal i8 @fun1(i8 %arg, i8 %arg1, i32 %arg2, i8 %arg3, i32* %arg4) #0 {
+  define internal i8 @fun1(i8 %arg, i8 %arg1, i32 %arg2, i8 %arg3, ptr %arg4) #0 {
     ret i8 0
   }
   
@@ -75,10 +75,10 @@ body:             |
   bb.0:
     liveins: $f0s, $f2s, $r6d
   
-    STG killed renamable $r6d, undef renamable $r1d, 0, $noreg :: (store (s64) into `i32** undef`)
+    STG killed renamable $r6d, undef renamable $r1d, 0, $noreg :: (store (s64) into `ptr undef`)
     renamable $r0d = LARL @g_181
     nofpexcept CEBR renamable $f0s, renamable $f2s, implicit-def $cc, implicit $fpc
-    STG renamable $r0d, undef renamable $r1d, 0, $noreg :: (store (s64) into `i32** undef`)
+    STG renamable $r0d, undef renamable $r1d, 0, $noreg :: (store (s64) into `ptr undef`)
     BRC 15, 4, %bb.2, implicit killed $cc
   
   bb.1:
@@ -89,36 +89,36 @@ body:             |
   bb.2:
     liveins: $f0s, $r0d
   
-    STE killed renamable $f0s, undef renamable $r1d, 0, $noreg :: (volatile store (s32) into `float* undef`)
+    STE killed renamable $f0s, undef renamable $r1d, 0, $noreg :: (volatile store (s32) into `ptr undef`)
     renamable $r1d = nuw LA %stack.0, 16, $noreg
     renamable $r2d = nuw LA %stack.0, 24, $noreg
     renamable $r3d = LA %stack.0, 40, $noreg
     renamable $r4d = LARL @g_1390
-    STG renamable $r4d, undef renamable $r1d, 0, $noreg :: (store (s64) into `i64*** undef`)
+    STG renamable $r4d, undef renamable $r1d, 0, $noreg :: (store (s64) into `ptr undef`)
     renamable $r5d = nuw LA %stack.0, 48, $noreg
     renamable $r14d = LA %stack.0, 72, $noreg
     renamable $r13d = LA %stack.0, 80, $noreg
     renamable $r12d = LA %stack.0, 56, $noreg
     renamable $r10d = LA %stack.0, 0, $noreg
-    STG renamable $r10d, undef renamable $r1d, 0, $noreg :: (store (s64) into `i64*** undef`)
+    STG renamable $r10d, undef renamable $r1d, 0, $noreg :: (store (s64) into `ptr undef`)
     renamable $r9d = LA %stack.0, 64, $noreg
     renamable $r8d = LA %stack.0, 88, $noreg
     renamable $r7d = nuw LA %stack.0, 8, $noreg
     MVGHI %stack.1, 904, 0
-    STG killed renamable $r9d, $noreg, 0, $noreg :: (store (s64) into `i64*** null`)
-    STG killed renamable $r3d, undef renamable $r1d, 0, $noreg :: (store (s64) into `i64*** undef`)
-    STG killed renamable $r14d, undef renamable $r1d, 0, $noreg :: (store (s64) into `i64*** undef`)
-    STG killed renamable $r7d, undef renamable $r1d, 0, $noreg :: (store (s64) into `i64*** undef`)
-    STG killed renamable $r1d, undef renamable $r1d, 0, $noreg :: (store (s64) into `i64*** undef`)
-    STG killed renamable $r4d, undef renamable $r1d, 0, $noreg :: (store (s64) into `i64*** undef`)
-    STG killed renamable $r2d, undef renamable $r1d, 0, $noreg :: (store (s64) into `i64*** undef`)
-    STG killed renamable $r5d, undef renamable $r1d, 0, $noreg :: (store (s64) into `i64*** undef`)
-    STG killed renamable $r8d, undef renamable $r1d, 0, $noreg :: (store (s64) into `i64*** undef`)
-    STG killed renamable $r12d, undef renamable $r1d, 0, $noreg :: (store (s64) into `i64*** undef`)
-    STG killed renamable $r13d, undef renamable $r1d, 0, $noreg :: (store (s64) into `i64*** undef`)
-    STG killed renamable $r10d, undef renamable $r1d, 0, $noreg :: (store (s64) into `i64*** undef`)
+    STG killed renamable $r9d, $noreg, 0, $noreg :: (store (s64) into `ptr null`)
+    STG killed renamable $r3d, undef renamable $r1d, 0, $noreg :: (store (s64) into `ptr undef`)
+    STG killed renamable $r14d, undef renamable $r1d, 0, $noreg :: (store (s64) into `ptr undef`)
+    STG killed renamable $r7d, undef renamable $r1d, 0, $noreg :: (store (s64) into `ptr undef`)
+    STG killed renamable $r1d, undef renamable $r1d, 0, $noreg :: (store (s64) into `ptr undef`)
+    STG killed renamable $r4d, undef renamable $r1d, 0, $noreg :: (store (s64) into `ptr undef`)
+    STG killed renamable $r2d, undef renamable $r1d, 0, $noreg :: (store (s64) into `ptr undef`)
+    STG killed renamable $r5d, undef renamable $r1d, 0, $noreg :: (store (s64) into `ptr undef`)
+    STG killed renamable $r8d, undef renamable $r1d, 0, $noreg :: (store (s64) into `ptr undef`)
+    STG killed renamable $r12d, undef renamable $r1d, 0, $noreg :: (store (s64) into `ptr undef`)
+    STG killed renamable $r13d, undef renamable $r1d, 0, $noreg :: (store (s64) into `ptr undef`)
+    STG killed renamable $r10d, undef renamable $r1d, 0, $noreg :: (store (s64) into `ptr undef`)
     $r2l = LHI 0
-    STG killed renamable $r0d, undef renamable $r1d, 0, $noreg :: (store (s64) into `i32** undef`)
+    STG killed renamable $r0d, undef renamable $r1d, 0, $noreg :: (store (s64) into `ptr undef`)
     Return implicit $r2l
 
 ...
@@ -162,38 +162,38 @@ body:             |
   bb.0:
     liveins: $r6d
   
-    STG killed renamable $r6d, undef renamable $r1d, 0, $noreg :: (store (s64) into `i32** undef`)
+    STG killed renamable $r6d, undef renamable $r1d, 0, $noreg :: (store (s64) into `ptr undef`)
     renamable $r0d = LARL @g_181
-    STG renamable $r0d, undef renamable $r1d, 0, $noreg :: (store (s64) into `i32** undef`)
+    STG renamable $r0d, undef renamable $r1d, 0, $noreg :: (store (s64) into `ptr undef`)
     renamable $r1d = nuw LA %stack.0, 16, $noreg
     renamable $r2d = nuw LA %stack.0, 24, $noreg
     renamable $r3d = LA %stack.0, 40, $noreg
     renamable $r4d = LARL @g_1390
-    STG renamable $r4d, undef renamable $r1d, 0, $noreg :: (store (s64) into `i64*** undef`)
+    STG renamable $r4d, undef renamable $r1d, 0, $noreg :: (store (s64) into `ptr undef`)
     renamable $r5d = nuw LA %stack.0, 48, $noreg
     renamable $r14d = LA %stack.0, 72, $noreg
     renamable $r13d = LA %stack.0, 80, $noreg
     renamable $r12d = LA %stack.0, 56, $noreg
     renamable $r10d = LA %stack.0, 0, $noreg
-    STG renamable $r10d, undef renamable $r1d, 0, $noreg :: (store (s64) into `i64*** undef`)
+    STG renamable $r10d, undef renamable $r1d, 0, $noreg :: (store (s64) into `ptr undef`)
     renamable $r9d = LA %stack.0, 64, $noreg
     renamable $r8d = LA %stack.0, 88, $noreg
     renamable $r7d = nuw LA %stack.0, 8, $noreg
     MVGHI %stack.1, 904, 0
-    STG killed renamable $r9d, $noreg, 0, $noreg :: (store (s64) into `i64*** null`)
-    STG killed renamable $r3d, undef renamable $r1d, 0, $noreg :: (store (s64) into `i64*** undef`)
-    STG killed renamable $r14d, undef renamable $r1d, 0, $noreg :: (store (s64) into `i64*** undef`)
-    STG killed renamable $r7d, undef renamable $r1d, 0, $noreg :: (store (s64) into `i64*** undef`)
-    STG killed renamable $r1d, undef renamable $r1d, 0, $noreg :: (store (s64) into `i64*** undef`)
-    STG killed renamable $r4d, undef renamable $r1d, 0, $noreg :: (store (s64) into `i64*** undef`)
-    STG killed renamable $r2d, undef renamable $r1d, 0, $noreg :: (store (s64) into `i64*** undef`)
-    STG killed renamable $r5d, undef renamable $r1d, 0, $noreg :: (store (s64) into `i64*** undef`)
-    STG killed renamable $r8d, undef renamable $r1d, 0, $noreg :: (store (s64) into `i64*** undef`)
-    STG killed renamable $r12d, undef renamable $r1d, 0, $noreg :: (store (s64) into `i64*** undef`)
-    STG killed renamable $r13d, undef renamable $r1d, 0, $noreg :: (store (s64) into `i64*** undef`)
-    STG killed renamable $r10d, undef renamable $r1d, 0, $noreg :: (store (s64) into `i64*** undef`)
+    STG killed renamable $r9d, $noreg, 0, $noreg :: (store (s64) into `ptr null`)
+    STG killed renamable $r3d, undef renamable $r1d, 0, $noreg :: (store (s64) into `ptr undef`)
+    STG killed renamable $r14d, undef renamable $r1d, 0, $noreg :: (store (s64) into `ptr undef`)
+    STG killed renamable $r7d, undef renamable $r1d, 0, $noreg :: (store (s64) into `ptr undef`)
+    STG killed renamable $r1d, undef renamable $r1d, 0, $noreg :: (store (s64) into `ptr undef`)
+    STG killed renamable $r4d, undef renamable $r1d, 0, $noreg :: (store (s64) into `ptr undef`)
+    STG killed renamable $r2d, undef renamable $r1d, 0, $noreg :: (store (s64) into `ptr undef`)
+    STG killed renamable $r5d, undef renamable $r1d, 0, $noreg :: (store (s64) into `ptr undef`)
+    STG killed renamable $r8d, undef renamable $r1d, 0, $noreg :: (store (s64) into `ptr undef`)
+    STG killed renamable $r12d, undef renamable $r1d, 0, $noreg :: (store (s64) into `ptr undef`)
+    STG killed renamable $r13d, undef renamable $r1d, 0, $noreg :: (store (s64) into `ptr undef`)
+    STG killed renamable $r10d, undef renamable $r1d, 0, $noreg :: (store (s64) into `ptr undef`)
     $r2l = LHI 0
-    STG killed renamable $r0d, undef renamable $r1d, 0, $noreg :: (store (s64) into `i32** undef`)
+    STG killed renamable $r0d, undef renamable $r1d, 0, $noreg :: (store (s64) into `ptr undef`)
     Return implicit $r2l
 
 ...

diff  --git a/llvm/test/CodeGen/SystemZ/int-cmp-56.mir b/llvm/test/CodeGen/SystemZ/int-cmp-56.mir
index 8038544f4e670..e52fd44ae47db 100644
--- a/llvm/test/CodeGen/SystemZ/int-cmp-56.mir
+++ b/llvm/test/CodeGen/SystemZ/int-cmp-56.mir
@@ -6,12 +6,12 @@
 
 --- |
   declare i64 @foo()
-  define i64 @fun1(i64* %ptr0)  { ret i64 0 }
-  define i64 @fun2(i64* %ptr0)  { ret i64 0 }
+  define i64 @fun1(ptr %ptr0)  { ret i64 0 }
+  define i64 @fun2(ptr %ptr0)  { ret i64 0 }
 
   declare i32 @foo32()
-  define i32 @fun3(i32* %ptr0)  { ret i32 0 }
-  define i32 @fun4(i32* %ptr0)  { ret i32 0 }
+  define i32 @fun3(ptr %ptr0)  { ret i32 0 }
+  define i32 @fun4(ptr %ptr0)  { ret i32 0 }
 ...
 
 

diff  --git a/llvm/test/CodeGen/SystemZ/isel-debug.ll b/llvm/test/CodeGen/SystemZ/isel-debug.ll
index b867dac692879..120a0e08ae9fa 100644
--- a/llvm/test/CodeGen/SystemZ/isel-debug.ll
+++ b/llvm/test/CodeGen/SystemZ/isel-debug.ll
@@ -9,11 +9,11 @@
 ; CHECK: Index
 ; CHECK: Disp
 
-define void @fun(i64* %ptr) {
+define void @fun(ptr %ptr) {
 entry:
-  %0 = bitcast i64* %ptr to i32**
-  %1 = load i32*, i32** %0, align 8
-  %xpv_pv = getelementptr inbounds i32, i32* %1
-  store i32 0, i32* %xpv_pv
+  %0 = bitcast ptr %ptr to ptr
+  %1 = load ptr, ptr %0, align 8
+  %xpv_pv = getelementptr inbounds i32, ptr %1
+  store i32 0, ptr %xpv_pv
   ret void
 }

diff  --git a/llvm/test/CodeGen/SystemZ/load-and-test-RA-hints.mir b/llvm/test/CodeGen/SystemZ/load-and-test-RA-hints.mir
index 3c10bb5983733..17988fdf6e6a1 100644
--- a/llvm/test/CodeGen/SystemZ/load-and-test-RA-hints.mir
+++ b/llvm/test/CodeGen/SystemZ/load-and-test-RA-hints.mir
@@ -25,20 +25,20 @@
     br label %bb20
   
   bb1:                                              ; preds = %bb1.preheader, %bb15
-    %lsr.iv3 = phi [512 x i32]* [ undef, %bb1.preheader ], [ %2, %bb15 ]
-    %lsr.iv1 = phi [300 x i32]* [ @rootlosers, %bb1.preheader ], [ %1, %bb15 ]
+    %lsr.iv3 = phi ptr [ undef, %bb1.preheader ], [ %2, %bb15 ]
+    %lsr.iv1 = phi ptr [ @rootlosers, %bb1.preheader ], [ %1, %bb15 ]
     %lsr.iv = phi i32 [ 0, %bb1.preheader ], [ %lsr.iv.next, %bb15 ]
     %tmp2 = phi i32 [ %tmp18, %bb15 ], [ 0, %bb1.preheader ]
     %tmp3 = phi i32 [ %tmp17, %bb15 ], [ 100000000, %bb1.preheader ]
-    %lsr.iv35 = bitcast [512 x i32]* %lsr.iv3 to i32*
-    %tmp5 = load i32, i32* %lsr.iv35, align 4, !tbaa !1
-    %tmp6 = load i32, i32* undef, align 4, !tbaa !1
+    %lsr.iv35 = bitcast ptr %lsr.iv3 to ptr
+    %tmp5 = load i32, ptr %lsr.iv35, align 4, !tbaa !1
+    %tmp6 = load i32, ptr undef, align 4, !tbaa !1
     %tmp7 = icmp eq i32 %tmp6, 0
     br i1 %tmp7, label %bb15, label %bb8
   
   bb8:                                              ; preds = %bb1
-    %0 = bitcast [300 x i32]* %lsr.iv1 to i32*
-    %tmp10 = load i32, i32* %0, align 4, !tbaa !1
+    %0 = bitcast ptr %lsr.iv1 to ptr
+    %tmp10 = load i32, ptr %0, align 4, !tbaa !1
     %tmp11 = icmp eq i32 %tmp10, 0
     %tmp12 = select i1 %tmp11, i32 %tmp5, i32 %tmp3
     %tmp14 = select i1 %tmp11, i32 %lsr.iv, i32 %tmp2
@@ -49,10 +49,10 @@
     %tmp17 = phi i32 [ %tmp3, %bb1 ], [ %tmp12, %bb8 ]
     %tmp18 = phi i32 [ %tmp2, %bb1 ], [ %tmp14, %bb8 ]
     %lsr.iv.next = add i32 %lsr.iv, 4
-    %scevgep = getelementptr [300 x i32], [300 x i32]* %lsr.iv1, i64 0, i64 4
-    %1 = bitcast i32* %scevgep to [300 x i32]*
-    %scevgep4 = getelementptr [512 x i32], [512 x i32]* %lsr.iv3, i64 0, i64 4
-    %2 = bitcast i32* %scevgep4 to [512 x i32]*
+    %scevgep = getelementptr [300 x i32], ptr %lsr.iv1, i64 0, i64 4
+    %1 = bitcast ptr %scevgep to ptr
+    %scevgep4 = getelementptr [512 x i32], ptr %lsr.iv3, i64 0, i64 4
+    %2 = bitcast ptr %scevgep4 to ptr
     br label %bb1
   
   bb20:                                             ; preds = %bb20, %bb20.preheader
@@ -141,7 +141,7 @@ body:             |
     successors: %bb.7(0x30000000), %bb.4(0x50000000)
   
     %5:grx32bit = LMux %21, 0, $noreg :: (load (s32) from %ir.lsr.iv35, !tbaa !1)
-    %6:grx32bit = LMux undef %19:addr64bit, 0, $noreg :: (load (s32) from `i32* undef`, !tbaa !1)
+    %6:grx32bit = LMux undef %19:addr64bit, 0, $noreg :: (load (s32) from `ptr undef`, !tbaa !1)
     CHIMux %6, 0, implicit-def $cc
     BRC 14, 6, %bb.4, implicit killed $cc
   

diff  --git a/llvm/test/CodeGen/SystemZ/loop-04.ll b/llvm/test/CodeGen/SystemZ/loop-04.ll
index 9e772851d076c..86666ea6c8dee 100644
--- a/llvm/test/CodeGen/SystemZ/loop-04.ll
+++ b/llvm/test/CodeGen/SystemZ/loop-04.ll
@@ -16,9 +16,9 @@ define void @main() local_unnamed_addr #0 {
 
 ; <label>:1:                                      ; preds = %1, %0
   %2 = phi i64 [ 0, %0 ], [ %7, %1 ]
-  %3 = getelementptr inbounds [10 x %0], [10 x %0]* bitcast (<{ <{ i64, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }>, <{ i64, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }>, <{ i64, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }>, <{ i64, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }>, <{ i64, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }>, <{ i64, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }>, <{ i64, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }>, <{ i64, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }>, <{ i64, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }>, <{ i64, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }> }>* @g_101 to [10 x %0]*), i64 0, i64 %2, i32 1
-  %4 = bitcast [11 x i8]* %3 to i88*
-  %5 = load i88, i88* %4, align 1
+  %3 = getelementptr inbounds [10 x %0], ptr @g_101, i64 0, i64 %2, i32 1
+  %4 = bitcast ptr %3 to ptr
+  %5 = load i88, ptr %4, align 1
   %6 = icmp ult i88 %5, 2361183241434822606848
   %7 = add nuw nsw i64 %2, 1
   br label %1

diff  --git a/llvm/test/CodeGen/SystemZ/multiselect-02.mir b/llvm/test/CodeGen/SystemZ/multiselect-02.mir
index ba23ee5361aad..0e7e3a9c75e8e 100644
--- a/llvm/test/CodeGen/SystemZ/multiselect-02.mir
+++ b/llvm/test/CodeGen/SystemZ/multiselect-02.mir
@@ -17,7 +17,7 @@ tracksRegLiveness: true
 body:             |
   bb.0.entry:
     %1:addr64bit = IMPLICIT_DEF
-    %0:gr32bit = LLC %1, 0, $noreg :: (load (s8) from `i8* undef`)
+    %0:gr32bit = LLC %1, 0, $noreg :: (load (s8) from `ptr undef`)
     CHI killed %0, 0, implicit-def $cc
     %2:gr32bit = LHI 2
     %3:gr32bit = LHI 8
@@ -27,7 +27,7 @@ body:             |
     %6:gr64bit = INSERT_SUBREG %7, killed %5, %subreg.subreg_l32
     %8:gr128bit = ZEXT128 killed %6
     %10:addr64bit = IMPLICIT_DEF
-    %9:gr128bit = DL %8, %10, 0, $noreg :: (load (s32) from `i64* undef` + 4)
+    %9:gr128bit = DL %8, %10, 0, $noreg :: (load (s32) from `ptr undef` + 4)
     %11:gr32bit = COPY %9.subreg_l32
     %12:gr64bit = LGHI 2
     %13:gr64bit = LGHI 8

diff  --git a/llvm/test/CodeGen/SystemZ/postra-sched-expandedops.mir b/llvm/test/CodeGen/SystemZ/postra-sched-expandedops.mir
index b093715457d8e..9d28f15b9a76e 100644
--- a/llvm/test/CodeGen/SystemZ/postra-sched-expandedops.mir
+++ b/llvm/test/CodeGen/SystemZ/postra-sched-expandedops.mir
@@ -28,25 +28,25 @@
   %0 = type { i8, i8, i8, i8, i16, i32, i32, i32 }
   
   @TTSize = external dso_local local_unnamed_addr global i32, align 4
-  @AS_TTable = external dso_local local_unnamed_addr global %0*, align 8
+  @AS_TTable = external dso_local local_unnamed_addr global ptr, align 8
   @Variant = external dso_local local_unnamed_addr global i32, align 4
   
   define dso_local void @LearnStoreTT(i32 signext %arg, i32 zeroext %arg1, i32 signext %arg2) #0 {
   bb:
-    %tmp = load i32, i32* @TTSize, align 4
+    %tmp = load i32, ptr @TTSize, align 4
     %tmp3 = urem i32 %arg1, %tmp
-    %tmp4 = load %0*, %0** @AS_TTable, align 8
+    %tmp4 = load ptr, ptr @AS_TTable, align 8
     %tmp5 = zext i32 %tmp3 to i64
-    %tmp6 = load i32, i32* @Variant, align 4
+    %tmp6 = load i32, ptr @Variant, align 4
     %tmp7 = add i32 %tmp6, -3
     %tmp8 = icmp ugt i32 %tmp7, 1
     %tmp9 = select i1 %tmp8, i8 3, i8 1
-    store i8 %tmp9, i8* undef, align 1
-    store i32 %arg, i32* undef, align 4
+    store i8 %tmp9, ptr undef, align 1
+    store i32 %arg, ptr undef, align 4
     %tmp10 = trunc i32 %arg2 to i8
-    store i8 %tmp10, i8* null, align 1
-    %tmp11 = getelementptr inbounds %0, %0* %tmp4, i64 %tmp5, i32 2
-    store i8 0, i8* %tmp11, align 2
+    store i8 %tmp10, ptr null, align 1
+    %tmp11 = getelementptr inbounds %0, ptr %tmp4, i64 %tmp5, i32 2
+    store i8 0, ptr %tmp11, align 2
     ret void
   }
   
@@ -79,9 +79,9 @@ body:             |
     CLFI killed renamable $r0l, 1, implicit-def $cc
     renamable $r0l = LHI 1
     renamable $r0l = LOCHI killed renamable $r0l, 3, 14, 2, implicit killed $cc
-    STC killed renamable $r0l, undef renamable $r1d, 0, $noreg :: (store (s8) into `i8* undef`)
-    ST renamable $r2l, undef renamable $r1d, 0, $noreg, implicit killed $r2d :: (store (s32) into `i32* undef`)
-    STC renamable $r4l, $noreg, 0, $noreg, implicit killed $r4d :: (store (s8) into `i8* null`)
+    STC killed renamable $r0l, undef renamable $r1d, 0, $noreg :: (store (s8) into `ptr undef`)
+    ST renamable $r2l, undef renamable $r1d, 0, $noreg, implicit killed $r2d :: (store (s32) into `ptr undef`)
+    STC renamable $r4l, $noreg, 0, $noreg, implicit killed $r4d :: (store (s8) into `ptr null`)
     renamable $r1d = MGHI killed renamable $r1d, 20
     renamable $r0l = LHI 0
     STC killed renamable $r0l, killed renamable $r3d, 2, killed renamable $r1d :: (store (s8) into %ir.tmp11, align 2)

diff  --git a/llvm/test/CodeGen/SystemZ/regalloc-GR128-02.mir b/llvm/test/CodeGen/SystemZ/regalloc-GR128-02.mir
index 9a338a058ed1c..8169635f813cb 100644
--- a/llvm/test/CodeGen/SystemZ/regalloc-GR128-02.mir
+++ b/llvm/test/CodeGen/SystemZ/regalloc-GR128-02.mir
@@ -10,14 +10,14 @@
   @g_193 = external dso_local unnamed_addr global i32, align 4
   
   define dso_local void @main() local_unnamed_addr {
-    %1 = load i32, i32* @g_193
+    %1 = load i32, ptr @g_193
     %2 = or i32 %1, -1395153718
     %3 = sdiv i32 -1395153718, %2
     br i1 undef, label %5, label %4
   
   ; <label>:4:                                      ; preds = %0
-    store i32 %3, i32* @g_74
-    store i32 -9, i32* @g_74
+    store i32 %3, ptr @g_74
+    store i32 -9, ptr @g_74
     ret void
   
   ; <label>:5:                                      ; preds = %0

diff  --git a/llvm/test/CodeGen/SystemZ/selectcc-04.ll b/llvm/test/CodeGen/SystemZ/selectcc-04.ll
index 8c5f015c00465..b5ce63793ae39 100644
--- a/llvm/test/CodeGen/SystemZ/selectcc-04.ll
+++ b/llvm/test/CodeGen/SystemZ/selectcc-04.ll
@@ -16,7 +16,7 @@ define dso_local void @fun() {
 entry:
   %tmp = add nuw nsw i16 0, 238
   %tmp4 = sub nsw i16 %tmp, 0
-  store i64 4, ptr getelementptr inbounds (<{ i64, i8, i8, i8, i8, i8, i8 }>, ptr @g_277, i64 0, i32 0), align 8
+  store i64 4, ptr @g_277, align 8
   %tmp5 = load i64, ptr getelementptr inbounds ([7 x i64], ptr @g_1531, i64 0, i64 5), align 8
   %tmp6 = trunc i64 %tmp5 to i32
   %tmp7 = trunc i64 %tmp5 to i16

diff  --git a/llvm/test/CodeGen/SystemZ/subregliveness-06.mir b/llvm/test/CodeGen/SystemZ/subregliveness-06.mir
index 1ca1114be6684..d0a8d4f10f4ee 100644
--- a/llvm/test/CodeGen/SystemZ/subregliveness-06.mir
+++ b/llvm/test/CodeGen/SystemZ/subregliveness-06.mir
@@ -20,19 +20,19 @@
   @g_747 = external dso_local unnamed_addr global i1, align 2
   @0 = internal unnamed_addr global i8 74, align 2
   @g_1055 = external dso_local unnamed_addr global i16, align 2
-  @g_195 = external dso_local global i64**, align 8
+  @g_195 = external dso_local global ptr, align 8
 
-  declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture) #0
+  declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #0
 
-  declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture) #0
+  declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #0
 
   define dso_local fastcc void @func_32(i8 zeroext %arg, i16 zeroext %arg1) unnamed_addr #1 {
   bb:
     %tmp = alloca i32, align 4
-    %tmp2 = alloca [5 x [5 x i32***]], align 8
-    %tmp3 = bitcast [5 x [5 x i32***]]* %tmp2 to i8*
-    %tmp4 = getelementptr inbounds [5 x [5 x i32***]], [5 x [5 x i32***]]* %tmp2, i64 0, i64 2, i64 2
-    %tmp5 = bitcast i32**** %tmp4 to i64***
+    %tmp2 = alloca [5 x [5 x ptr]], align 8
+    %tmp3 = bitcast ptr %tmp2 to ptr
+    %tmp4 = getelementptr inbounds [5 x [5 x ptr]], ptr %tmp2, i64 0, i64 2, i64 2
+    %tmp5 = bitcast ptr %tmp4 to ptr
     br label %bb6
 
   bb6:                                              ; preds = %bb40, %bb
@@ -41,66 +41,66 @@
     %tmp9 = phi i8 [ %arg, %bb ], [ 0, %bb40 ]
     %tmp10 = sext i8 %tmp7 to i64
     %tmp11 = add nsw i64 %tmp10, 1
-    %tmp12 = getelementptr inbounds [10 x i8], [10 x i8]* @g_314, i64 0, i64 %tmp11
-    %tmp13 = load volatile i8, i8* %tmp12, align 1
+    %tmp12 = getelementptr inbounds [10 x i8], ptr @g_314, i64 0, i64 %tmp11
+    %tmp13 = load volatile i8, ptr %tmp12, align 1
     br i1 undef, label %bb39, label %bb14
 
   bb14:                                             ; preds = %bb6
-    %tmp15 = load i64**, i64*** @g_195, align 8
-    %tmp16 = load volatile i8, i8* %tmp12, align 1
-    store i32 7, i32* %tmp, align 4
-    call void @llvm.lifetime.start.p0i8(i64 200, i8* nonnull %tmp3)
-    store i32 580868341, i32* @g_69, align 4
+    %tmp15 = load ptr, ptr @g_195, align 8
+    %tmp16 = load volatile i8, ptr %tmp12, align 1
+    store i32 7, ptr %tmp, align 4
+    call void @llvm.lifetime.start.p0(i64 200, ptr nonnull %tmp3)
+    store i32 580868341, ptr @g_69, align 4
     %tmp17 = zext i8 %tmp9 to i64
-    %tmp18 = load i64, i64* @g_352, align 8
+    %tmp18 = load i64, ptr @g_352, align 8
     %tmp19 = and i64 %tmp18, %tmp17
     %tmp20 = icmp ne i64 %tmp19, 1
     %tmp21 = zext i1 %tmp20 to i64
-    %tmp22 = load i64*, i64** %tmp15, align 8
-    store i64 %tmp21, i64* %tmp22, align 8
-    %tmp23 = load i32, i32* @g_334, align 4
+    %tmp22 = load ptr, ptr %tmp15, align 8
+    store i64 %tmp21, ptr %tmp22, align 8
+    %tmp23 = load i32, ptr @g_334, align 4
     %tmp24 = xor i32 %tmp23, 1
-    store i32 %tmp24, i32* @g_334, align 4
+    store i32 %tmp24, ptr @g_334, align 4
     %tmp25 = zext i8 %tmp9 to i16
     %tmp26 = mul i16 %tmp25, 26036
-    %tmp27 = load i64**, i64*** %tmp5, align 8
+    %tmp27 = load ptr, ptr %tmp5, align 8
     br label %bb28
 
   bb28:                                             ; preds = %bb14
     %tmp29 = mul i16 %tmp26, %tmp8
     %tmp30 = zext i16 %tmp29 to i32
-    store i32 %tmp30, i32* @g_69, align 4
-    store i8 0, i8* @g_226, align 2
+    store i32 %tmp30, ptr @g_69, align 4
+    store i8 0, ptr @g_226, align 2
     br label %bb32
 
   bb31:                                             ; preds = %bb35
-    call void @llvm.lifetime.end.p0i8(i64 200, i8* nonnull %tmp3)
+    call void @llvm.lifetime.end.p0(i64 200, ptr nonnull %tmp3)
     br label %bb40
 
   bb32:                                             ; preds = %bb34, %bb28
-    store i16 1, i16* @g_1055, align 2
-    store i64 0, i64* @g_352, align 8
-    store i32* @g_334, i32** undef, align 8
+    store i16 1, ptr @g_1055, align 2
+    store i64 0, ptr @g_352, align 8
+    store ptr @g_334, ptr undef, align 8
     %tmp33 = or i64 0, 1
-    store i64 %tmp33, i64* @g_352, align 8
-    store i32* @g_334, i32** null, align 8
+    store i64 %tmp33, ptr @g_352, align 8
+    store ptr @g_334, ptr null, align 8
     br label %bb34
 
   bb34:                                             ; preds = %bb32
     br i1 false, label %bb32, label %bb35
 
   bb35:                                             ; preds = %bb34
-    store i32* %tmp, i32** undef, align 8
-    store i8 0, i8* @0, align 2
-    store i16 2, i16* @g_189, align 2
-    store i8 1, i8* @g_54, align 2
-    store i1 true, i1* @g_747, align 2
-    store i64 0, i64* undef, align 8
-    %tmp36 = load i64*, i64** undef, align 8
-    %tmp37 = load i64, i64* %tmp36, align 8
-    %tmp38 = load i64*, i64** %tmp27, align 8
-    store i64 %tmp37, i64* %tmp38, align 8
-    store i16 0, i16* @g_189, align 2
+    store ptr %tmp, ptr undef, align 8
+    store i8 0, ptr @0, align 2
+    store i16 2, ptr @g_189, align 2
+    store i8 1, ptr @g_54, align 2
+    store i1 true, ptr @g_747, align 2
+    store i64 0, ptr undef, align 8
+    %tmp36 = load ptr, ptr undef, align 8
+    %tmp37 = load i64, ptr %tmp36, align 8
+    %tmp38 = load ptr, ptr %tmp27, align 8
+    store i64 %tmp37, ptr %tmp38, align 8
+    store i16 0, ptr @g_189, align 2
     br label %bb31
 
   bb39:                                             ; preds = %bb6
@@ -108,7 +108,7 @@
 
   bb40:                                             ; preds = %bb39, %bb31
     %tmp41 = phi i16 [ undef, %bb39 ], [ 0, %bb31 ]
-    %tmp42 = load volatile i8, i8* %tmp12, align 1
+    %tmp42 = load volatile i8, ptr %tmp12, align 1
     %tmp43 = add i8 %tmp7, 1
     br i1 false, label %bb6, label %bb44
 
@@ -205,9 +205,9 @@ body:             |
   bb.4.bb32:
     STHRL %11, @g_1055 :: (store (s16) into @g_1055)
     STGRL %9, @g_352 :: (store (s64) into @g_352)
-    STG %10, undef %42:addr64bit, 0, $noreg :: (store (s64) into `i32** undef`)
+    STG %10, undef %42:addr64bit, 0, $noreg :: (store (s64) into `ptr undef`)
     STGRL %13, @g_352 :: (store (s64) into @g_352)
-    STG %10, $noreg, 0, $noreg :: (store (s64) into `i32** null`)
+    STG %10, $noreg, 0, $noreg :: (store (s64) into `ptr null`)
 
   bb.5.bb34:
     successors: %bb.4(0x7c000000), %bb.6(0x04000000)
@@ -217,12 +217,12 @@ body:             |
     J %bb.6
 
   bb.6.bb35:
-    STG %14, undef %43:addr64bit, 0, $noreg :: (store (s64) into `i32** undef`)
+    STG %14, undef %43:addr64bit, 0, $noreg :: (store (s64) into `ptr undef`)
     MVI %15, 0, 0 :: (store (s8) into @0, align 2)
     STHRL %16, @g_189 :: (store (s16) into @g_189)
     MVI %17, 0, 1 :: (store (s8) into @g_54, align 2)
     MVI %18, 0, 1 :: (store (s8) into @g_747, align 2)
-    MVGHI undef %44:addr64bit, 0, 0 :: (store (s64) into `i64* undef`)
+    MVGHI undef %44:addr64bit, 0, 0 :: (store (s64) into `ptr undef`)
     %45:gr64bit = LG $noreg, 0, $noreg :: (load (s64) from %ir.tmp36)
     %46:addr64bit = LG killed %39, 0, $noreg :: (load (s64) from %ir.tmp27)
     STG killed %45, killed %46, 0, $noreg :: (store (s64) into %ir.tmp38)

diff  --git a/llvm/test/CodeGen/SystemZ/zos-landingpad.ll b/llvm/test/CodeGen/SystemZ/zos-landingpad.ll
index 481efb5935b36..7f3214d574242 100644
--- a/llvm/test/CodeGen/SystemZ/zos-landingpad.ll
+++ b/llvm/test/CodeGen/SystemZ/zos-landingpad.ll
@@ -4,12 +4,12 @@
 ; and Exception Selector registers, and that the exception table is emitted.
 
 declare void @callee()
-declare void @passeh(i8*, i32) noreturn
+declare void @passeh(ptr, i32) noreturn
 declare i32 @__zos_cxx_personality_v2(...)
 
-define void @test1() uwtable personality i32 (...)* @__zos_cxx_personality_v2 {
+define void @test1() uwtable personality ptr @__zos_cxx_personality_v2 {
 entry:
-  %ehptr = alloca i8*, align 8
+  %ehptr = alloca ptr, align 8
   %ehsel = alloca i32, align 8
   invoke void @callee() to label %done unwind label %lpad
 done:
@@ -17,16 +17,16 @@ done:
 ; Match the return instruction.
 ; CHECK: b 2(7)
 lpad:
-  %0 = landingpad { i8*, i32 } cleanup
+  %0 = landingpad { ptr, i32 } cleanup
 ; The Exception Pointer is %r1; the Exception Selector, %r2.
 ; CHECK: @BB{{[^%]*}} %lpad
 ; CHECK-DAG: stg 1, {{.*}}
 ; CHECK-DAG: st 2, {{.*}}
-  %1 = extractvalue { i8*, i32 } %0, 0
-  %2 = extractvalue { i8*, i32 } %0, 1
-  store i8* %1, i8** %ehptr, align 8
-  store i32 %2, i32* %ehsel, align 8
-  call void @passeh(i8* %1, i32 %2)
+  %1 = extractvalue { ptr, i32 } %0, 0
+  %2 = extractvalue { ptr, i32 } %0, 1
+  store ptr %1, ptr %ehptr, align 8
+  store i32 %2, ptr %ehsel, align 8
+  call void @passeh(ptr %1, i32 %2)
   unreachable
 }
 

diff  --git a/llvm/test/CodeGen/VE/Scalar/pic_access_data.ll b/llvm/test/CodeGen/VE/Scalar/pic_access_data.ll
index 184b1aa8a5532..12000063a7bea 100644
--- a/llvm/test/CodeGen/VE/Scalar/pic_access_data.ll
+++ b/llvm/test/CodeGen/VE/Scalar/pic_access_data.ll
@@ -1,7 +1,7 @@
 ; RUN: llc -relocation-model=pic < %s -mtriple=ve-unknown-unknown | FileCheck %s
 
 @dst = external global i32, align 4
- at ptr = external global i32*, align 8
+ at ptr = external global ptr, align 8
 @src = external global i32, align 4
 
 define i32 @func() {
@@ -33,8 +33,8 @@ define i32 @func() {
 ; CHECK-NEXT:    ld %s15, 24(, %s11)
 ; CHECK-NEXT:    b.l.t (, %s10)
 
-  store i32* @dst, i32** @ptr, align 8
-  %1 = load i32, i32* @src, align 4
-  store i32 %1, i32* @dst, align 4
+  store ptr @dst, ptr @ptr, align 8
+  %1 = load i32, ptr @src, align 4
+  store i32 %1, ptr @dst, align 4
   ret i32 1
 }

diff  --git a/llvm/test/CodeGen/VE/Scalar/pic_indirect_func_call.ll b/llvm/test/CodeGen/VE/Scalar/pic_indirect_func_call.ll
index 1e758c92901a7..92c7c9821d9e0 100644
--- a/llvm/test/CodeGen/VE/Scalar/pic_indirect_func_call.ll
+++ b/llvm/test/CodeGen/VE/Scalar/pic_indirect_func_call.ll
@@ -1,6 +1,6 @@
 ; RUN: llc -relocation-model=pic < %s -mtriple=ve-unknown-unknown | FileCheck %s
 
- at ptr = external global void (...)*, align 8
+ at ptr = external global ptr, align 8
 
 define void @func() {
 ; CHECK-LABEL: func:
@@ -22,9 +22,9 @@ define void @func() {
 ; CHECK-NEXT:    bsic %s10, (, %s12)
 ; CHECK-NEXT:    or %s11, 0, %s9
 
-  store void (...)* @function, void (...)** @ptr, align 8
-  %1 = load void (...)*, void (...)** @ptr, align 8
-  %2 = bitcast void (...)* %1 to void ()*
+  store ptr @function, ptr @ptr, align 8
+  %1 = load ptr, ptr @ptr, align 8
+  %2 = bitcast ptr %1 to ptr
   call void %2()
   ret void
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/cfg-stackify.ll b/llvm/test/CodeGen/WebAssembly/cfg-stackify.ll
index 6e0bbce78a1ae..c443c4ddb4729 100644
--- a/llvm/test/CodeGen/WebAssembly/cfg-stackify.ll
+++ b/llvm/test/CodeGen/WebAssembly/cfg-stackify.ll
@@ -92,7 +92,7 @@ back:
 ; CHECK: end_loop
 ; CHECK: end_block
 ; CHECK: return{{$}}
-define void @test2(double* nocapture %p, i32 %n) {
+define void @test2(ptr nocapture %p, i32 %n) {
 entry:
   %cmp.4 = icmp sgt i32 %n, 0
   br i1 %cmp.4, label %for.body.preheader, label %for.end
@@ -102,10 +102,10 @@ for.body.preheader:
 
 for.body:
   %i.05 = phi i32 [ %inc, %for.body ], [ 0, %for.body.preheader ]
-  %arrayidx = getelementptr inbounds double, double* %p, i32 %i.05
-  %0 = load double, double* %arrayidx, align 8
+  %arrayidx = getelementptr inbounds double, ptr %p, i32 %i.05
+  %0 = load double, ptr %arrayidx, align 8
   %mul = fmul double %0, 3.200000e+00
-  store double %mul, double* %arrayidx, align 8
+  store double %mul, ptr %arrayidx, align 8
   %inc = add nuw nsw i32 %i.05, 1
   %exitcond = icmp eq i32 %inc, %n
   br i1 %exitcond, label %for.end.loopexit, label %for.body
@@ -133,26 +133,26 @@ for.end:
 ; CHECK-NEXT: end_block{{$}}
 ; CHECK: i32.const $push{{[0-9]+}}=, 0{{$}}
 ; CHECK-NEXT: return $pop{{[0-9]+}}{{$}}
-define i32 @doublediamond(i32 %a, i32 %b, i32* %p) {
+define i32 @doublediamond(i32 %a, i32 %b, ptr %p) {
 entry:
   %c = icmp eq i32 %a, 0
   %d = icmp eq i32 %b, 0
-  store volatile i32 0, i32* %p
+  store volatile i32 0, ptr %p
   br i1 %c, label %true, label %false
 true:
-  store volatile i32 1, i32* %p
+  store volatile i32 1, ptr %p
   br label %exit
 false:
-  store volatile i32 2, i32* %p
+  store volatile i32 2, ptr %p
   br i1 %d, label %ft, label %ff
 ft:
-  store volatile i32 3, i32* %p
+  store volatile i32 3, ptr %p
   br label %exit
 ff:
-  store volatile i32 4, i32* %p
+  store volatile i32 4, ptr %p
   br label %exit
 exit:
-  store volatile i32 5, i32* %p
+  store volatile i32 5, ptr %p
   ret i32 0
 }
 
@@ -161,16 +161,16 @@ exit:
 ; CHECK: br_if 0, $1{{$}}
 ; CHECK: .LBB{{[0-9]+}}_2:
 ; CHECK: return
-define i32 @triangle(i32* %p, i32 %a) {
+define i32 @triangle(ptr %p, i32 %a) {
 entry:
   %c = icmp eq i32 %a, 0
-  store volatile i32 0, i32* %p
+  store volatile i32 0, ptr %p
   br i1 %c, label %true, label %exit
 true:
-  store volatile i32 1, i32* %p
+  store volatile i32 1, ptr %p
   br label %exit
 exit:
-  store volatile i32 2, i32* %p
+  store volatile i32 2, ptr %p
   ret i32 0
 }
 
@@ -183,28 +183,28 @@ exit:
 ; CHECK: .LBB{{[0-9]+}}_3:
 ; CHECK: i32.const $push{{[0-9]+}}=, 0{{$}}
 ; CHECK-NEXT: return $pop{{[0-9]+}}{{$}}
-define i32 @diamond(i32* %p, i32 %a) {
+define i32 @diamond(ptr %p, i32 %a) {
 entry:
   %c = icmp eq i32 %a, 0
-  store volatile i32 0, i32* %p
+  store volatile i32 0, ptr %p
   br i1 %c, label %true, label %false
 true:
-  store volatile i32 1, i32* %p
+  store volatile i32 1, ptr %p
   br label %exit
 false:
-  store volatile i32 2, i32* %p
+  store volatile i32 2, ptr %p
   br label %exit
 exit:
-  store volatile i32 3, i32* %p
+  store volatile i32 3, ptr %p
   ret i32 0
 }
 
 ; CHECK-LABEL: single_block:
 ; CHECK-NOT: br
 ; CHECK: return $pop{{[0-9]+}}{{$}}
-define i32 @single_block(i32* %p) {
+define i32 @single_block(ptr %p) {
 entry:
-  store volatile i32 0, i32* %p
+  store volatile i32 0, ptr %p
   ret i32 0
 }
 
@@ -215,12 +215,12 @@ entry:
 ; CHECK: i32.store 0($0), $pop{{[0-9]+}}{{$}}
 ; CHECK: br 0{{$}}
 ; CHECK: .LBB{{[0-9]+}}_2:
-define i32 @minimal_loop(i32* %p) {
+define i32 @minimal_loop(ptr %p) {
 entry:
-  store volatile i32 0, i32* %p
+  store volatile i32 0, ptr %p
   br label %loop
 loop:
-  store volatile i32 1, i32* %p
+  store volatile i32 1, ptr %p
   br label %loop
 }
 
@@ -232,16 +232,16 @@ loop:
 ; CHECK-NEXT: end_loop{{$}}
 ; CHECK: i32.const $push{{[0-9]+}}=, 0{{$}}
 ; CHECK-NEXT: return $pop{{[0-9]+}}{{$}}
-define i32 @simple_loop(i32* %p, i32 %a) {
+define i32 @simple_loop(ptr %p, i32 %a) {
 entry:
   %c = icmp eq i32 %a, 0
-  store volatile i32 0, i32* %p
+  store volatile i32 0, ptr %p
   br label %loop
 loop:
-  store volatile i32 1, i32* %p
+  store volatile i32 1, ptr %p
   br i1 %c, label %loop, label %exit
 exit:
-  store volatile i32 2, i32* %p
+  store volatile i32 2, ptr %p
   ret i32 0
 }
 
@@ -253,23 +253,23 @@ exit:
 ; CHECK: .LBB{{[0-9]+}}_3:
 ; CHECK: .LBB{{[0-9]+}}_4:
 ; CHECK: return
-define i32 @doubletriangle(i32 %a, i32 %b, i32* %p) {
+define i32 @doubletriangle(i32 %a, i32 %b, ptr %p) {
 entry:
   %c = icmp eq i32 %a, 0
   %d = icmp eq i32 %b, 0
-  store volatile i32 0, i32* %p
+  store volatile i32 0, ptr %p
   br i1 %c, label %true, label %exit
 true:
-  store volatile i32 2, i32* %p
+  store volatile i32 2, ptr %p
   br i1 %d, label %tt, label %tf
 tt:
-  store volatile i32 3, i32* %p
+  store volatile i32 3, ptr %p
   br label %tf
 tf:
-  store volatile i32 4, i32* %p
+  store volatile i32 4, ptr %p
   br label %exit
 exit:
-  store volatile i32 5, i32* %p
+  store volatile i32 5, ptr %p
   ret i32 0
 }
 
@@ -283,23 +283,23 @@ exit:
 ; CHECK: .LBB{{[0-9]+}}_4:
 ; CHECK: i32.const $push{{[0-9]+}}=, 0{{$}}
 ; CHECK-NEXT: return $pop{{[0-9]+}}{{$}}
-define i32 @ifelse_earlyexits(i32 %a, i32 %b, i32* %p) {
+define i32 @ifelse_earlyexits(i32 %a, i32 %b, ptr %p) {
 entry:
   %c = icmp eq i32 %a, 0
   %d = icmp eq i32 %b, 0
-  store volatile i32 0, i32* %p
+  store volatile i32 0, ptr %p
   br i1 %c, label %true, label %false
 true:
-  store volatile i32 1, i32* %p
+  store volatile i32 1, ptr %p
   br label %exit
 false:
-  store volatile i32 2, i32* %p
+  store volatile i32 2, ptr %p
   br i1 %d, label %ft, label %exit
 ft:
-  store volatile i32 3, i32* %p
+  store volatile i32 3, ptr %p
   br label %exit
 exit:
-  store volatile i32 4, i32* %p
+  store volatile i32 4, ptr %p
   ret i32 0
 }
 
@@ -318,28 +318,28 @@ exit:
 ; CHECK: br              0{{$}}
 ; CHECK: .LBB{{[0-9]+}}_6:
 ; CHECK-NEXT: end_loop{{$}}
-define i32 @doublediamond_in_a_loop(i32 %a, i32 %b, i32* %p) {
+define i32 @doublediamond_in_a_loop(i32 %a, i32 %b, ptr %p) {
 entry:
   br label %header
 header:
   %c = icmp eq i32 %a, 0
   %d = icmp eq i32 %b, 0
-  store volatile i32 0, i32* %p
+  store volatile i32 0, ptr %p
   br i1 %c, label %true, label %false
 true:
-  store volatile i32 1, i32* %p
+  store volatile i32 1, ptr %p
   br label %exit
 false:
-  store volatile i32 2, i32* %p
+  store volatile i32 2, ptr %p
   br i1 %d, label %ft, label %ff
 ft:
-  store volatile i32 3, i32* %p
+  store volatile i32 3, ptr %p
   br label %exit
 ff:
-  store volatile i32 4, i32* %p
+  store volatile i32 4, ptr %p
   br label %exit
 exit:
-  store volatile i32 5, i32* %p
+  store volatile i32 5, ptr %p
   br label %header
 }
 
@@ -431,19 +431,19 @@ entry:
   br label %header
 
 header:
-  store volatile i32 0, i32* null
+  store volatile i32 0, ptr null
   br i1 %p, label %more, label %alt
 
 more:
-  store volatile i32 1, i32* null
+  store volatile i32 1, ptr null
   br i1 %q, label %header, label %return
 
 alt:
-  store volatile i32 2, i32* null
+  store volatile i32 2, ptr null
   ret void
 
 return:
-  store volatile i32 3, i32* null
+  store volatile i32 3, ptr null
   ret void
 }
 
@@ -477,27 +477,27 @@ entry:
   br label %header
 
 header:
-  store volatile i32 0, i32* null
+  store volatile i32 0, ptr null
   br i1 %p, label %more, label %second
 
 more:
-  store volatile i32 1, i32* null
+  store volatile i32 1, ptr null
   br i1 %q, label %evenmore, label %first
 
 evenmore:
-  store volatile i32 1, i32* null
+  store volatile i32 1, ptr null
   br i1 %q, label %header, label %return
 
 return:
-  store volatile i32 2, i32* null
+  store volatile i32 2, ptr null
   ret void
 
 first:
-  store volatile i32 3, i32* null
+  store volatile i32 3, ptr null
   br label %second
 
 second:
-  store volatile i32 4, i32* null
+  store volatile i32 4, ptr null
   ret void
 }
 
@@ -523,27 +523,27 @@ second:
 ; CHECK:       unreachable
 define void @test7(i1 %tobool2, i1 %tobool9) {
 entry:
-  store volatile i32 0, i32* null
+  store volatile i32 0, ptr null
   br label %loop
 
 loop:
-  store volatile i32 1, i32* null
+  store volatile i32 1, ptr null
   br i1 %tobool2, label %l1, label %l0
 
 l0:
-  store volatile i32 2, i32* null
+  store volatile i32 2, ptr null
   br i1 %tobool9, label %loop, label %u0
 
 l1:
-  store volatile i32 3, i32* null
+  store volatile i32 3, ptr null
   br i1 %tobool9, label %loop, label %u1
 
 u0:
-  store volatile i32 4, i32* null
+  store volatile i32 4, ptr null
   unreachable
 
 u1:
-  store volatile i32 5, i32* null
+  store volatile i32 5, ptr null
   unreachable
 }
 
@@ -605,31 +605,31 @@ bb3:
 declare i1 @a()
 define void @test9() {
 entry:
-  store volatile i32 0, i32* null
+  store volatile i32 0, ptr null
   br label %header
 
 header:
-  store volatile i32 1, i32* null
+  store volatile i32 1, ptr null
   %call4 = call i1 @a()
   br i1 %call4, label %header2, label %end
 
 header2:
-  store volatile i32 2, i32* null
+  store volatile i32 2, ptr null
   %call = call i1 @a()
   br i1 %call, label %if.then, label %if.else
 
 if.then:
-  store volatile i32 3, i32* null
+  store volatile i32 3, ptr null
   %call3 = call i1 @a()
   br i1 %call3, label %header2, label %header
 
 if.else:
-  store volatile i32 4, i32* null
+  store volatile i32 4, ptr null
   %call2 = call i1 @a()
   br i1 %call2, label %header2, label %header
 
 end:
-  store volatile i32 5, i32* null
+  store volatile i32 5, ptr null
   ret void
 }
 
@@ -732,31 +732,31 @@ bb6:
 ; CHECK:       return{{$}}
 define void @test11() {
 bb0:
-  store volatile i32 0, i32* null
+  store volatile i32 0, ptr null
   br i1 undef, label %bb1, label %bb4
 bb1:
-  store volatile i32 1, i32* null
+  store volatile i32 1, ptr null
   br i1 undef, label %bb3, label %bb2
 bb2:
-  store volatile i32 2, i32* null
+  store volatile i32 2, ptr null
   br i1 undef, label %bb3, label %bb7
 bb3:
-  store volatile i32 3, i32* null
+  store volatile i32 3, ptr null
   ret void
 bb4:
-  store volatile i32 4, i32* null
+  store volatile i32 4, ptr null
   br i1 undef, label %bb8, label %bb5
 bb5:
-  store volatile i32 5, i32* null
+  store volatile i32 5, ptr null
   br i1 undef, label %bb6, label %bb7
 bb6:
-  store volatile i32 6, i32* null
+  store volatile i32 6, ptr null
   ret void
 bb7:
-  store volatile i32 7, i32* null
+  store volatile i32 7, ptr null
   ret void
 bb8:
-  store volatile i32 8, i32* null
+  store volatile i32 8, ptr null
   ret void
 }
 
@@ -778,14 +778,14 @@ bb8:
 ; CHECK-NEXT:  end_loop{{$}}
 ; CHECK-NEXT:  end_block{{$}}
 ; CHECK-NEXT:  return{{$}}
-define void @test12(i8* %arg) {
+define void @test12(ptr %arg) {
 bb:
   br label %bb1
 
 bb1:
   %tmp = phi i32 [ 0, %bb ], [ %tmp5, %bb4 ]
-  %tmp2 = getelementptr i8, i8* %arg, i32 %tmp
-  %tmp3 = load i8, i8* %tmp2
+  %tmp2 = getelementptr i8, ptr %arg, i32 %tmp
+  %tmp3 = load i8, ptr %tmp2
   switch i8 %tmp3, label %bb7 [
     i8 42, label %bb4
     i8 76, label %bb4
@@ -933,23 +933,23 @@ bb:
   br i1 %tmp1, label %bb2, label %bb14
 
 bb2:
-  %tmp3 = phi %0** [ %tmp6, %bb5 ], [ null, %bb ]
+  %tmp3 = phi ptr [ %tmp6, %bb5 ], [ null, %bb ]
   %tmp4 = icmp eq i32 0, 11
   br i1 %tmp4, label %bb5, label %bb8
 
 bb5:
-  %tmp = bitcast i8* null to %0**
-  %tmp6 = getelementptr %0*, %0** %tmp3, i32 1
-  %tmp7 = icmp eq %0** %tmp6, null
+  %tmp = bitcast ptr null to ptr
+  %tmp6 = getelementptr ptr, ptr %tmp3, i32 1
+  %tmp7 = icmp eq ptr %tmp6, null
   br i1 %tmp7, label %bb10, label %bb2
 
 bb8:
-  %tmp9 = icmp eq %0** null, undef
+  %tmp9 = icmp eq ptr null, undef
   br label %bb10
 
 bb10:
-  %tmp11 = phi %0** [ null, %bb8 ], [ %tmp, %bb5 ]
-  %tmp12 = icmp eq %0** null, %tmp11
+  %tmp11 = phi ptr [ null, %bb8 ], [ %tmp, %bb5 ]
+  %tmp12 = icmp eq ptr null, %tmp11
   br i1 %tmp12, label %bb15, label %bb13
 
 bb13:

diff  --git a/llvm/test/CodeGen/WebAssembly/global.ll b/llvm/test/CodeGen/WebAssembly/global.ll
index dc9b909dc7ea4..943ab8c78e567 100644
--- a/llvm/test/CodeGen/WebAssembly/global.ll
+++ b/llvm/test/CodeGen/WebAssembly/global.ll
@@ -7,14 +7,14 @@ target triple = "wasm32-unknown-unknown"
 
 ; CHECK-NOT: llvm.used
 ; CHECK-NOT: llvm.metadata
- at llvm.used = appending global [1 x i32*] [i32* @g], section "llvm.metadata"
+ at llvm.used = appending global [1 x ptr] [ptr @g], section "llvm.metadata"
 
 ; CHECK: foo:
 ; CHECK: i32.const $push0=, 0{{$}}
 ; CHECK-NEXT: i32.load $push1=, answer($pop0){{$}}
 ; CHECK-NEXT: return $pop1{{$}}
 define i32 @foo() {
-  %a = load i32, i32* @answer
+  %a = load i32, ptr @answer
   ret i32 %a
 }
 
@@ -22,10 +22,10 @@ define i32 @foo() {
 ; CHECK-NEXT: .functype call_memcpy (i32, i32, i32) -> (i32){{$}}
 ; CHECK-NEXT: call            $push0=, memcpy, $0, $1, $2{{$}}
 ; CHECK-NEXT: return          $pop0{{$}}
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1)
-define i8* @call_memcpy(i8* %p, i8* nocapture readonly %q, i32 %n) {
-  tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %p, i8* %q, i32 %n, i1 false)
-  ret i8* %p
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture, ptr nocapture readonly, i32, i1)
+define ptr @call_memcpy(ptr %p, ptr nocapture readonly %q, i32 %n) {
+  tail call void @llvm.memcpy.p0.p0.i32(ptr %p, ptr %q, i32 %n, i1 false)
+  ret ptr %p
 }
 
 ; CHECK: .type   .Lg, at object
@@ -187,7 +187,7 @@ define i8* @call_memcpy(i8* %p, i8* nocapture readonly %q, i32 %n) {
 ; CHECK-NEXT: .int32 arr+80
 ; CHECK-NEXT: .size ptr, 4
 @arr = global [128 x i32] zeroinitializer, align 16
- at ptr = global i32* getelementptr inbounds ([128 x i32], [128 x i32]* @arr, i32 0, i32 20), align 4
+ at ptr = global ptr getelementptr inbounds ([128 x i32], ptr @arr, i32 0, i32 20), align 4
 
 ; Constant global.
 ; CHECK: .type    rom, at object{{$}}
@@ -211,11 +211,11 @@ define i8* @call_memcpy(i8* %p, i8* nocapture readonly %q, i32 %n) {
 ; CHECK-NEXT: .int32      array+4
 ; CHECK-NEXT: .size       pointer_to_array, 4
 @array = internal constant [8 x i8] zeroinitializer, align 1
- at pointer_to_array = constant i8* getelementptr inbounds ([8 x i8], [8 x i8]* @array, i32 0, i32 4), align 4
+ at pointer_to_array = constant ptr getelementptr inbounds ([8 x i8], ptr @array, i32 0, i32 4), align 4
 
 ; Handle external objects with opaque type.
 %struct.ASTRUCT = type opaque
 @g_struct = external global %struct.ASTRUCT, align 1
 define i32 @address_of_opaque()  {
-  ret i32 ptrtoint (%struct.ASTRUCT* @g_struct to i32)
+  ret i32 ptrtoint (ptr @g_struct to i32)
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/userstack.ll b/llvm/test/CodeGen/WebAssembly/userstack.ll
index f7bdcdd42ba13..fe43fac207781 100644
--- a/llvm/test/CodeGen/WebAssembly/userstack.ll
+++ b/llvm/test/CodeGen/WebAssembly/userstack.ll
@@ -554,10 +554,10 @@ define void @llvm_stack_builtins(i32 %alloc) noredzone {
 ; CHECK-LABEL: llvm_stacksave_noalloca:
 define void @llvm_stacksave_noalloca() noredzone {
  ; CHECK: global.get $push[[L11:.+]]=, __stack_pointer{{$}}
- %stack = call i8* @llvm.stacksave()
+ %stack = call ptr @llvm.stacksave()
 
  ; CHECK-NEXT: call use_i8_star, $pop[[L11:.+]]
- call void @use_i8_star(i8* %stack)
+ call void @use_i8_star(ptr %stack)
 
  ret void
 }

diff  --git a/llvm/test/CodeGen/WinCFGuard/cfguard-cast.ll b/llvm/test/CodeGen/WinCFGuard/cfguard-cast.ll
index 406248d107a20..ca088445be0da 100644
--- a/llvm/test/CodeGen/WinCFGuard/cfguard-cast.ll
+++ b/llvm/test/CodeGen/WinCFGuard/cfguard-cast.ll
@@ -6,7 +6,7 @@
 declare void @unprototyped(...)
 
 define i32 @call_unprototyped() {
-  call void bitcast (void (...)* @unprototyped to void ()*)()
+  call void @unprototyped()
   ret i32 0
 }
 
@@ -17,8 +17,8 @@ define i32 @call_unprototyped() {
 
 declare void @escaped_cast()
 
-define i32 @escape_it_with_cast(i8** %p) {
-  store i8* bitcast (void ()* @escaped_cast to i8*), i8** %p
+define i32 @escape_it_with_cast(ptr %p) {
+  store ptr @escaped_cast, ptr %p
   ret i32 0
 }
 
@@ -28,7 +28,7 @@ declare void @dead_constant()
 !0 = !{i32 2, !"cfguard", i32 1}
 
 !dead_constant_root = !{!1}
-!1 = !DITemplateValueParameter(name: "dead_constant", value: i8* bitcast (void ()* @dead_constant to i8*))
+!1 = !DITemplateValueParameter(name: "dead_constant", value: ptr @dead_constant)
 
 ; CHECK-LABEL: .section .gfids$y,"dr"
 ; CHECK-NEXT:  .symidx escaped_cast

diff  --git a/llvm/test/CodeGen/WinCFGuard/cfguard-giats.ll b/llvm/test/CodeGen/WinCFGuard/cfguard-giats.ll
index 655e30417845d..461459fc6f486 100644
--- a/llvm/test/CodeGen/WinCFGuard/cfguard-giats.ll
+++ b/llvm/test/CodeGen/WinCFGuard/cfguard-giats.ll
@@ -5,7 +5,7 @@
 declare dllimport i32 @target_func1()
 declare dllimport i32 @target_func2()
 declare dllimport i32 @target_func3()
- at ptrs = dso_local local_unnamed_addr global [2 x void ()*] [void ()* bitcast (i32 ()* @target_func2 to void ()*), void ()* bitcast (i32 ()* @target_func3 to void ()*)], align 16
+ at ptrs = dso_local local_unnamed_addr global [2 x ptr] [ptr @target_func2, ptr @target_func3], align 16
 
 ; Test address-taken functions from imported DLLs are correctly added to the 
 ; Guard Address-Taken IAT Entry (.giats) and Guard Function ID (.gfids) sections.
@@ -14,19 +14,19 @@ entry:
   ; Since it is a dllimport, target_func1 will be represented as "__imp_target_func1" when it is
   ; stored in the function pointer. Therefore, the .giats section must contain "__imp_target_func1".
   ; Unlike MSVC, we also have "target_func1" in the .gfids section, since this is not a security risk.
-  %func_ptr = alloca i32 ()*, align 8
-  store i32 ()* @target_func1, i32 ()** %func_ptr, align 8
-  %0 = load i32 ()*, i32 ()** %func_ptr, align 8
+  %func_ptr = alloca ptr, align 8
+  store ptr @target_func1, ptr %func_ptr, align 8
+  %0 = load ptr, ptr %func_ptr, align 8
   %1 = call i32 %0()
   ; target_func2 is called directly from a global array, so should only appear in the .gfids section.
-  %2 = load i32 ()*, i32 ()** bitcast ([2 x void ()*]* @ptrs to i32 ()**), align 8
+  %2 = load ptr, ptr @ptrs, align 8
   %3 = call i32 %2()
   ; target_func3 is called both via a stored function pointer (as with target_func1) and via a gloabl
   ; array (as with target_func2), so "target_func3" must appear in .gfids and "__imp_target_func3" in .giats.
-  store i32 ()* @target_func3, i32 ()** %func_ptr, align 8
-  %4 = load i32 ()*, i32 ()** %func_ptr, align 8
+  store ptr @target_func3, ptr %func_ptr, align 8
+  %4 = load ptr, ptr %func_ptr, align 8
   %5 = call i32 %4()
-  %6 = load i32 ()*, i32 ()** bitcast (void ()** getelementptr inbounds ([2 x void ()*], [2 x void ()*]* @ptrs, i64 0, i64 1) to i32 ()**), align 8
+  %6 = load ptr, ptr getelementptr inbounds ([2 x ptr], ptr @ptrs, i64 0, i64 1), align 8
   %7 = call i32 %6()
   ret i32 %5
 }

diff  --git a/llvm/test/CodeGen/WinCFGuard/cfguard.ll b/llvm/test/CodeGen/WinCFGuard/cfguard.ll
index 3eb0e3f788d53..2ec2e573f7164 100644
--- a/llvm/test/CodeGen/WinCFGuard/cfguard.ll
+++ b/llvm/test/CodeGen/WinCFGuard/cfguard.ll
@@ -13,12 +13,12 @@ target datalayout = "e-m:w-i64:64-f80:128-n8:16:32:64-S128"
 target triple = "x86_64-pc-windows-msvc"
 
 %struct.Derived = type { %struct.Base }
-%struct.Base = type { i32 (...)** }
+%struct.Base = type { ptr }
 %rtti.CompleteObjectLocator = type { i32, i32, i32, i32, i32, i32 }
-%rtti.TypeDescriptor13 = type { i8**, i8*, [14 x i8] }
+%rtti.TypeDescriptor13 = type { ptr, ptr, [14 x i8] }
 %rtti.ClassHierarchyDescriptor = type { i32, i32, i32, i32 }
 %rtti.BaseClassDescriptor = type { i32, i32, i32, i32, i32, i32, i32 }
-%rtti.TypeDescriptor10 = type { i8**, i8*, [11 x i8] }
+%rtti.TypeDescriptor10 = type { ptr, ptr, [11 x i8] }
 
 $"\01??0Derived@@QEAA at XZ" = comdat any
 
@@ -51,43 +51,43 @@ $"\01??_7Base@@6B@" = comdat largest
 $"\01??_R4Base@@6B@" = comdat any
 
 @"\01?D@@3UDerived@@A" = global %struct.Derived zeroinitializer, align 8
- at 0 = private unnamed_addr constant { [2 x i8*] } { [2 x i8*] [i8* bitcast (%rtti.CompleteObjectLocator* @"\01??_R4Derived@@6B@" to i8*), i8* bitcast (i32 (%struct.Derived*)* @"\01?virt_method at Derived@@UEBAHXZ" to i8*)] }, comdat($"\01??_7Derived@@6B@")
-@"\01??_R4Derived@@6B@" = linkonce_odr constant %rtti.CompleteObjectLocator { i32 1, i32 0, i32 0, i32 trunc (i64 sub nuw nsw (i64 ptrtoint (%rtti.TypeDescriptor13* @"\01??_R0?AUDerived@@@8" to i64), i64 ptrtoint (i8* @__ImageBase to i64)) to i32), i32 trunc (i64 sub nuw nsw (i64 ptrtoint (%rtti.ClassHierarchyDescriptor* @"\01??_R3Derived@@8" to i64), i64 ptrtoint (i8* @__ImageBase to i64)) to i32), i32 trunc (i64 sub nuw nsw (i64 ptrtoint (%rtti.CompleteObjectLocator* @"\01??_R4Derived@@6B@" to i64), i64 ptrtoint (i8* @__ImageBase to i64)) to i32) }, comdat
-@"\01??_7type_info@@6B@" = external constant i8*
-@"\01??_R0?AUDerived@@@8" = linkonce_odr global %rtti.TypeDescriptor13 { i8** @"\01??_7type_info@@6B@", i8* null, [14 x i8] c".?AUDerived@@\00" }, comdat
+ at 0 = private unnamed_addr constant { [2 x ptr] } { [2 x ptr] [ptr @"\01??_R4Derived@@6B@", ptr @"\01?virt_method at Derived@@UEBAHXZ"] }, comdat($"\01??_7Derived@@6B@")
+@"\01??_R4Derived@@6B@" = linkonce_odr constant %rtti.CompleteObjectLocator { i32 1, i32 0, i32 0, i32 trunc (i64 sub nuw nsw (i64 ptrtoint (ptr @"\01??_R0?AUDerived@@@8" to i64), i64 ptrtoint (ptr @__ImageBase to i64)) to i32), i32 trunc (i64 sub nuw nsw (i64 ptrtoint (ptr @"\01??_R3Derived@@8" to i64), i64 ptrtoint (ptr @__ImageBase to i64)) to i32), i32 trunc (i64 sub nuw nsw (i64 ptrtoint (ptr @"\01??_R4Derived@@6B@" to i64), i64 ptrtoint (ptr @__ImageBase to i64)) to i32) }, comdat
+@"\01??_7type_info@@6B@" = external constant ptr
+@"\01??_R0?AUDerived@@@8" = linkonce_odr global %rtti.TypeDescriptor13 { ptr @"\01??_7type_info@@6B@", ptr null, [14 x i8] c".?AUDerived@@\00" }, comdat
 @__ImageBase = external constant i8
-@"\01??_R3Derived@@8" = linkonce_odr constant %rtti.ClassHierarchyDescriptor { i32 0, i32 0, i32 2, i32 trunc (i64 sub nuw nsw (i64 ptrtoint ([3 x i32]* @"\01??_R2Derived@@8" to i64), i64 ptrtoint (i8* @__ImageBase to i64)) to i32) }, comdat
-@"\01??_R2Derived@@8" = linkonce_odr constant [3 x i32] [i32 trunc (i64 sub nuw nsw (i64 ptrtoint (%rtti.BaseClassDescriptor* @"\01??_R1A@?0A at EA@Derived@@8" to i64), i64 ptrtoint (i8* @__ImageBase to i64)) to i32), i32 trunc (i64 sub nuw nsw (i64 ptrtoint (%rtti.BaseClassDescriptor* @"\01??_R1A@?0A at EA@Base@@8" to i64), i64 ptrtoint (i8* @__ImageBase to i64)) to i32), i32 0], comdat
-@"\01??_R1A@?0A at EA@Derived@@8" = linkonce_odr constant %rtti.BaseClassDescriptor { i32 trunc (i64 sub nuw nsw (i64 ptrtoint (%rtti.TypeDescriptor13* @"\01??_R0?AUDerived@@@8" to i64), i64 ptrtoint (i8* @__ImageBase to i64)) to i32), i32 1, i32 0, i32 -1, i32 0, i32 64, i32 trunc (i64 sub nuw nsw (i64 ptrtoint (%rtti.ClassHierarchyDescriptor* @"\01??_R3Derived@@8" to i64), i64 ptrtoint (i8* @__ImageBase to i64)) to i32) }, comdat
-@"\01??_R1A@?0A at EA@Base@@8" = linkonce_odr constant %rtti.BaseClassDescriptor { i32 trunc (i64 sub nuw nsw (i64 ptrtoint (%rtti.TypeDescriptor10* @"\01??_R0?AUBase@@@8" to i64), i64 ptrtoint (i8* @__ImageBase to i64)) to i32), i32 0, i32 0, i32 -1, i32 0, i32 64, i32 trunc (i64 sub nuw nsw (i64 ptrtoint (%rtti.ClassHierarchyDescriptor* @"\01??_R3Base@@8" to i64), i64 ptrtoint (i8* @__ImageBase to i64)) to i32) }, comdat
-@"\01??_R0?AUBase@@@8" = linkonce_odr global %rtti.TypeDescriptor10 { i8** @"\01??_7type_info@@6B@", i8* null, [11 x i8] c".?AUBase@@\00" }, comdat
-@"\01??_R3Base@@8" = linkonce_odr constant %rtti.ClassHierarchyDescriptor { i32 0, i32 0, i32 1, i32 trunc (i64 sub nuw nsw (i64 ptrtoint ([2 x i32]* @"\01??_R2Base@@8" to i64), i64 ptrtoint (i8* @__ImageBase to i64)) to i32) }, comdat
-@"\01??_R2Base@@8" = linkonce_odr constant [2 x i32] [i32 trunc (i64 sub nuw nsw (i64 ptrtoint (%rtti.BaseClassDescriptor* @"\01??_R1A@?0A at EA@Base@@8" to i64), i64 ptrtoint (i8* @__ImageBase to i64)) to i32), i32 0], comdat
- at 1 = private unnamed_addr constant { [2 x i8*] } { [2 x i8*] [i8* bitcast (%rtti.CompleteObjectLocator* @"\01??_R4Base@@6B@" to i8*), i8* bitcast (void ()* @_purecall to i8*)] }, comdat($"\01??_7Base@@6B@")
-@"\01??_R4Base@@6B@" = linkonce_odr constant %rtti.CompleteObjectLocator { i32 1, i32 0, i32 0, i32 trunc (i64 sub nuw nsw (i64 ptrtoint (%rtti.TypeDescriptor10* @"\01??_R0?AUBase@@@8" to i64), i64 ptrtoint (i8* @__ImageBase to i64)) to i32), i32 trunc (i64 sub nuw nsw (i64 ptrtoint (%rtti.ClassHierarchyDescriptor* @"\01??_R3Base@@8" to i64), i64 ptrtoint (i8* @__ImageBase to i64)) to i32), i32 trunc (i64 sub nuw nsw (i64 ptrtoint (%rtti.CompleteObjectLocator* @"\01??_R4Base@@6B@" to i64), i64 ptrtoint (i8* @__ImageBase to i64)) to i32) }, comdat
- at llvm.global_ctors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 65535, void ()* @_GLOBAL__sub_I_cfguard.cpp, i8* null }]
-
-@"\01??_7Derived@@6B@" = unnamed_addr alias i8*, getelementptr inbounds ({ [2 x i8*] }, { [2 x i8*] }* @0, i32 0, i32 0, i32 1)
-@"\01??_7Base@@6B@" = unnamed_addr alias i8*, getelementptr inbounds ({ [2 x i8*] }, { [2 x i8*] }* @1, i32 0, i32 0, i32 1)
+@"\01??_R3Derived@@8" = linkonce_odr constant %rtti.ClassHierarchyDescriptor { i32 0, i32 0, i32 2, i32 trunc (i64 sub nuw nsw (i64 ptrtoint (ptr @"\01??_R2Derived@@8" to i64), i64 ptrtoint (ptr @__ImageBase to i64)) to i32) }, comdat
+@"\01??_R2Derived@@8" = linkonce_odr constant [3 x i32] [i32 trunc (i64 sub nuw nsw (i64 ptrtoint (ptr @"\01??_R1A@?0A at EA@Derived@@8" to i64), i64 ptrtoint (ptr @__ImageBase to i64)) to i32), i32 trunc (i64 sub nuw nsw (i64 ptrtoint (ptr @"\01??_R1A@?0A at EA@Base@@8" to i64), i64 ptrtoint (ptr @__ImageBase to i64)) to i32), i32 0], comdat
+@"\01??_R1A@?0A at EA@Derived@@8" = linkonce_odr constant %rtti.BaseClassDescriptor { i32 trunc (i64 sub nuw nsw (i64 ptrtoint (ptr @"\01??_R0?AUDerived@@@8" to i64), i64 ptrtoint (ptr @__ImageBase to i64)) to i32), i32 1, i32 0, i32 -1, i32 0, i32 64, i32 trunc (i64 sub nuw nsw (i64 ptrtoint (ptr @"\01??_R3Derived@@8" to i64), i64 ptrtoint (ptr @__ImageBase to i64)) to i32) }, comdat
+@"\01??_R1A@?0A at EA@Base@@8" = linkonce_odr constant %rtti.BaseClassDescriptor { i32 trunc (i64 sub nuw nsw (i64 ptrtoint (ptr @"\01??_R0?AUBase@@@8" to i64), i64 ptrtoint (ptr @__ImageBase to i64)) to i32), i32 0, i32 0, i32 -1, i32 0, i32 64, i32 trunc (i64 sub nuw nsw (i64 ptrtoint (ptr @"\01??_R3Base@@8" to i64), i64 ptrtoint (ptr @__ImageBase to i64)) to i32) }, comdat
+@"\01??_R0?AUBase@@@8" = linkonce_odr global %rtti.TypeDescriptor10 { ptr @"\01??_7type_info@@6B@", ptr null, [11 x i8] c".?AUBase@@\00" }, comdat
+@"\01??_R3Base@@8" = linkonce_odr constant %rtti.ClassHierarchyDescriptor { i32 0, i32 0, i32 1, i32 trunc (i64 sub nuw nsw (i64 ptrtoint (ptr @"\01??_R2Base@@8" to i64), i64 ptrtoint (ptr @__ImageBase to i64)) to i32) }, comdat
+@"\01??_R2Base@@8" = linkonce_odr constant [2 x i32] [i32 trunc (i64 sub nuw nsw (i64 ptrtoint (ptr @"\01??_R1A@?0A at EA@Base@@8" to i64), i64 ptrtoint (ptr @__ImageBase to i64)) to i32), i32 0], comdat
+ at 1 = private unnamed_addr constant { [2 x ptr] } { [2 x ptr] [ptr @"\01??_R4Base@@6B@", ptr @_purecall] }, comdat($"\01??_7Base@@6B@")
+@"\01??_R4Base@@6B@" = linkonce_odr constant %rtti.CompleteObjectLocator { i32 1, i32 0, i32 0, i32 trunc (i64 sub nuw nsw (i64 ptrtoint (ptr @"\01??_R0?AUBase@@@8" to i64), i64 ptrtoint (ptr @__ImageBase to i64)) to i32), i32 trunc (i64 sub nuw nsw (i64 ptrtoint (ptr @"\01??_R3Base@@8" to i64), i64 ptrtoint (ptr @__ImageBase to i64)) to i32), i32 trunc (i64 sub nuw nsw (i64 ptrtoint (ptr @"\01??_R4Base@@6B@" to i64), i64 ptrtoint (ptr @__ImageBase to i64)) to i32) }, comdat
+ at llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65535, ptr @_GLOBAL__sub_I_cfguard.cpp, ptr null }]
+
+@"\01??_7Derived@@6B@" = unnamed_addr alias ptr, getelementptr inbounds ({ [2 x ptr] }, ptr @0, i32 0, i32 0, i32 1)
+@"\01??_7Base@@6B@" = unnamed_addr alias ptr, getelementptr inbounds ({ [2 x ptr] }, ptr @1, i32 0, i32 0, i32 1)
 
 ; Function Attrs: noinline nounwind
 define internal void @"\01??__ED@@YAXXZ"() #0 {
 entry:
-  %call = call %struct.Derived* @"\01??0Derived@@QEAA at XZ"(%struct.Derived* @"\01?D@@3UDerived@@A") #2
+  %call = call ptr @"\01??0Derived@@QEAA at XZ"(ptr @"\01?D@@3UDerived@@A") #2
   ret void
 }
 
 ; Function Attrs: noinline nounwind optnone
-define linkonce_odr %struct.Derived* @"\01??0Derived@@QEAA at XZ"(%struct.Derived* returned %this) unnamed_addr #1 comdat align 2 {
+define linkonce_odr ptr @"\01??0Derived@@QEAA at XZ"(ptr returned %this) unnamed_addr #1 comdat align 2 {
 entry:
-  %this.addr = alloca %struct.Derived*, align 8
-  store %struct.Derived* %this, %struct.Derived** %this.addr, align 8
-  %this1 = load %struct.Derived*, %struct.Derived** %this.addr, align 8
-  %0 = bitcast %struct.Derived* %this1 to %struct.Base*
-  %call = call %struct.Base* @"\01??0Base@@QEAA at XZ"(%struct.Base* %0) #2
-  %1 = bitcast %struct.Derived* %this1 to i32 (...)***
-  store i32 (...)** bitcast (i8** @"\01??_7Derived@@6B@" to i32 (...)**), i32 (...)*** %1, align 8
-  ret %struct.Derived* %this1
+  %this.addr = alloca ptr, align 8
+  store ptr %this, ptr %this.addr, align 8
+  %this1 = load ptr, ptr %this.addr, align 8
+  %0 = bitcast ptr %this1 to ptr
+  %call = call ptr @"\01??0Base@@QEAA at XZ"(ptr %0) #2
+  %1 = bitcast ptr %this1 to ptr
+  store ptr @"\01??_7Derived@@6B@", ptr %1, align 8
+  ret ptr %this1
 }
 
 ; Function Attrs: noinline nounwind optnone
@@ -97,50 +97,50 @@ entry:
 }
 
 ; Function Attrs: noinline nounwind optnone
-define void ()* @"\01?foo@@YAP6AXXZPEAUBase@@@Z"(%struct.Base* %B) #1 {
+define ptr @"\01?foo@@YAP6AXXZPEAUBase@@@Z"(ptr %B) #1 {
 entry:
-  %retval = alloca void ()*, align 8
-  %B.addr = alloca %struct.Base*, align 8
-  store %struct.Base* %B, %struct.Base** %B.addr, align 8
-  %0 = load %struct.Base*, %struct.Base** %B.addr, align 8
-  %1 = bitcast %struct.Base* %0 to i32 (%struct.Base*)***
-  %vtable = load i32 (%struct.Base*)**, i32 (%struct.Base*)*** %1, align 8
-  %vfn = getelementptr inbounds i32 (%struct.Base*)*, i32 (%struct.Base*)** %vtable, i64 0
-  %2 = load i32 (%struct.Base*)*, i32 (%struct.Base*)** %vfn, align 8
-  %call = call i32 %2(%struct.Base* %0)
+  %retval = alloca ptr, align 8
+  %B.addr = alloca ptr, align 8
+  store ptr %B, ptr %B.addr, align 8
+  %0 = load ptr, ptr %B.addr, align 8
+  %1 = bitcast ptr %0 to ptr
+  %vtable = load ptr, ptr %1, align 8
+  %vfn = getelementptr inbounds ptr, ptr %vtable, i64 0
+  %2 = load ptr, ptr %vfn, align 8
+  %call = call i32 %2(ptr %0)
   %tobool = icmp ne i32 %call, 0
   br i1 %tobool, label %if.then, label %if.end
 
 if.then:                                          ; preds = %entry
-  store void ()* @"\01?address_taken@@YAXXZ", void ()** %retval, align 8
+  store ptr @"\01?address_taken@@YAXXZ", ptr %retval, align 8
   br label %return
 
 if.end:                                           ; preds = %entry
-  store void ()* null, void ()** %retval, align 8
+  store ptr null, ptr %retval, align 8
   br label %return
 
 return:                                           ; preds = %if.end, %if.then
-  %3 = load void ()*, void ()** %retval, align 8
-  ret void ()* %3
+  %3 = load ptr, ptr %retval, align 8
+  ret ptr %3
 }
 
 ; Function Attrs: noinline nounwind optnone
-define linkonce_odr %struct.Base* @"\01??0Base@@QEAA at XZ"(%struct.Base* returned %this) unnamed_addr #1 comdat align 2 {
+define linkonce_odr ptr @"\01??0Base@@QEAA at XZ"(ptr returned %this) unnamed_addr #1 comdat align 2 {
 entry:
-  %this.addr = alloca %struct.Base*, align 8
-  store %struct.Base* %this, %struct.Base** %this.addr, align 8
-  %this1 = load %struct.Base*, %struct.Base** %this.addr, align 8
-  %0 = bitcast %struct.Base* %this1 to i32 (...)***
-  store i32 (...)** bitcast (i8** @"\01??_7Base@@6B@" to i32 (...)**), i32 (...)*** %0, align 8
-  ret %struct.Base* %this1
+  %this.addr = alloca ptr, align 8
+  store ptr %this, ptr %this.addr, align 8
+  %this1 = load ptr, ptr %this.addr, align 8
+  %0 = bitcast ptr %this1 to ptr
+  store ptr @"\01??_7Base@@6B@", ptr %0, align 8
+  ret ptr %this1
 }
 
 ; Function Attrs: noinline nounwind optnone
-define linkonce_odr i32 @"\01?virt_method at Derived@@UEBAHXZ"(%struct.Derived* %this) unnamed_addr #1 comdat align 2 {
+define linkonce_odr i32 @"\01?virt_method at Derived@@UEBAHXZ"(ptr %this) unnamed_addr #1 comdat align 2 {
 entry:
-  %this.addr = alloca %struct.Derived*, align 8
-  store %struct.Derived* %this, %struct.Derived** %this.addr, align 8
-  %this1 = load %struct.Derived*, %struct.Derived** %this.addr, align 8
+  %this.addr = alloca ptr, align 8
+  store ptr %this, ptr %this.addr, align 8
+  %this1 = load ptr, ptr %this.addr, align 8
   ret i32 42
 }
 

diff  --git a/llvm/test/CodeGen/XCore/threads.ll b/llvm/test/CodeGen/XCore/threads.ll
index c24c6568ab27d..40bcf88e31831 100644
--- a/llvm/test/CodeGen/XCore/threads.ll
+++ b/llvm/test/CodeGen/XCore/threads.ll
@@ -1,20 +1,20 @@
 ; RUN: llc -mtriple=xcore-unknown-unknown < %s | FileCheck %s
 ; RUN: llc -mtriple=xcore-unknown-unknown -O=0 < %s | FileCheck %s -check-prefix=PHINODE
 
-declare ptr addrspace(1) @llvm.xcore.getst.p1i8.p1i8(ptr addrspace(1) %r)
-declare void @llvm.xcore.msync.p1i8(ptr addrspace(1) %r)
+declare ptr addrspace(1) @llvm.xcore.getst.p1.p1(ptr addrspace(1) %r)
+declare void @llvm.xcore.msync.p1(ptr addrspace(1) %r)
 declare void @llvm.xcore.ssync()
-declare void @llvm.xcore.mjoin.p1i8(ptr addrspace(1) %r)
-declare void @llvm.xcore.initsp.p1i8(ptr addrspace(1) %r, ptr %value)
-declare void @llvm.xcore.initpc.p1i8(ptr addrspace(1) %r, ptr %value)
-declare void @llvm.xcore.initlr.p1i8(ptr addrspace(1) %r, ptr %value)
-declare void @llvm.xcore.initcp.p1i8(ptr addrspace(1) %r, ptr %value)
-declare void @llvm.xcore.initdp.p1i8(ptr addrspace(1) %r, ptr %value)
+declare void @llvm.xcore.mjoin.p1(ptr addrspace(1) %r)
+declare void @llvm.xcore.initsp.p1(ptr addrspace(1) %r, ptr %value)
+declare void @llvm.xcore.initpc.p1(ptr addrspace(1) %r, ptr %value)
+declare void @llvm.xcore.initlr.p1(ptr addrspace(1) %r, ptr %value)
+declare void @llvm.xcore.initcp.p1(ptr addrspace(1) %r, ptr %value)
+declare void @llvm.xcore.initdp.p1(ptr addrspace(1) %r, ptr %value)
 
 define ptr addrspace(1) @test_getst(ptr addrspace(1) %r) {
 ; CHECK-LABEL: test_getst:
 ; CHECK: getst r0, res[r0]
-  %result = call ptr addrspace(1) @llvm.xcore.getst.p1i8.p1i8(ptr addrspace(1) %r)
+  %result = call ptr addrspace(1) @llvm.xcore.getst.p1.p1(ptr addrspace(1) %r)
   ret ptr addrspace(1) %result
 }
 
@@ -28,42 +28,42 @@ define void @test_ssync() {
 define void @test_mjoin(ptr addrspace(1) %r) {
 ; CHECK-LABEL: test_mjoin:
 ; CHECK: mjoin res[r0]
-  call void @llvm.xcore.mjoin.p1i8(ptr addrspace(1) %r)
+  call void @llvm.xcore.mjoin.p1(ptr addrspace(1) %r)
   ret void
 }
 
 define void @test_initsp(ptr addrspace(1) %t, ptr %src) {
 ; CHECK-LABEL: test_initsp:
 ; CHECK: init t[r0]:sp, r1
-  call void @llvm.xcore.initsp.p1i8(ptr addrspace(1) %t, ptr %src)
+  call void @llvm.xcore.initsp.p1(ptr addrspace(1) %t, ptr %src)
   ret void
 }
 
 define void @test_initpc(ptr addrspace(1) %t, ptr %src) {
 ; CHECK-LABEL: test_initpc:
 ; CHECK: init t[r0]:pc, r1
-  call void @llvm.xcore.initpc.p1i8(ptr addrspace(1) %t, ptr %src)
+  call void @llvm.xcore.initpc.p1(ptr addrspace(1) %t, ptr %src)
   ret void
 }
 
 define void @test_initlr(ptr addrspace(1) %t, ptr %src) {
 ; CHECK-LABEL: test_initlr:
 ; CHECK: init t[r0]:lr, r1
-  call void @llvm.xcore.initlr.p1i8(ptr addrspace(1) %t, ptr %src)
+  call void @llvm.xcore.initlr.p1(ptr addrspace(1) %t, ptr %src)
   ret void
 }
 
 define void @test_initcp(ptr addrspace(1) %t, ptr %src) {
 ; CHECK-LABEL: test_initcp:
 ; CHECK: init t[r0]:cp, r1
-  call void @llvm.xcore.initcp.p1i8(ptr addrspace(1) %t, ptr %src)
+  call void @llvm.xcore.initcp.p1(ptr addrspace(1) %t, ptr %src)
   ret void
 }
 
 define void @test_initdp(ptr addrspace(1) %t, ptr %src) {
 ; CHECK-LABEL: test_initdp:
 ; CHECK: init t[r0]:dp, r1
-  call void @llvm.xcore.initdp.p1i8(ptr addrspace(1) %t, ptr %src)
+  call void @llvm.xcore.initdp.p1(ptr addrspace(1) %t, ptr %src)
   ret void
 }
 
@@ -88,7 +88,7 @@ define ptr @f_tle() {
 ; CHECK: ldaw [[R1:r[0-9]]], dp[tle]
 ; r0 = &tl + id*8
 ; CHECK: add r0, [[R1]], [[R0]]
-  ret ptr getelementptr inbounds ([2 x i32], ptr @tle, i32 0, i32 0)
+  ret ptr @tle
 }
 
 define i32 @f_tlExpr () {
@@ -99,8 +99,8 @@ define i32 @f_tlExpr () {
 ; CHECK: add [[R2:r[0-9]]], [[R1]], [[R0]]
 ; CHECK: add r0, [[R2]], [[R2]]
   ret i32 add(
-      i32 ptrtoint( ptr getelementptr inbounds ([2 x i32], ptr @tle, i32 0, i32 0) to i32),
-      i32 ptrtoint( ptr getelementptr inbounds ([2 x i32], ptr @tle, i32 0, i32 0) to i32))
+      i32 ptrtoint( ptr @tle to i32),
+      i32 ptrtoint( ptr @tle to i32))
 }
 
 define void @phiNode1() {


        


More information about the llvm-commits mailing list